VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 55979

Last change on this file since 55979 was 55899, checked in by vboxsync, 10 years ago

PGM: Added an access origin to memory read & write calls that respects handlers. This will later be passed to the access handler, so that things like the page pool (and potentially others) can query IEM about instruction details when needed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 427.4 KB
Line 
1/* $Id: IEMAll.cpp 55899 2015-05-18 09:47:57Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
224 /*LogFunc(a_LoggerArgs);*/ \
225 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
226 } while (0)
227#else
228# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
230#endif
231
232/**
233 * Call an opcode decoder function.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF.
237 */
238#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
255
256/**
257 * Check if we're currently executing in real or virtual 8086 mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
290 * @returns PCCPUMFEATURES
291 * @param a_pIemCpu The IEM state of the current CPU.
292 */
293#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
294
295/**
296 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
297 * @returns PCCPUMFEATURES
298 * @param a_pIemCpu The IEM state of the current CPU.
299 */
300#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325static const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334static const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343static const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352static const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361static const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370static const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379static const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390static const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401static const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410static const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419static const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428static const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437static const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446static const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455static const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464static const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486static const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495static const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504static const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513static const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523static const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532static const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559static const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568static const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577static const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587static const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596static const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605static const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639/** Function table for the PUNPCKLBW instruction */
640static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
641/** Function table for the PUNPCKLBD instruction */
642static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
643/** Function table for the PUNPCKLDQ instruction */
644static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
645/** Function table for the PUNPCKLQDQ instruction */
646static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
647
648/** Function table for the PUNPCKHBW instruction */
649static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
650/** Function table for the PUNPCKHBD instruction */
651static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
652/** Function table for the PUNPCKHDQ instruction */
653static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
654/** Function table for the PUNPCKHQDQ instruction */
655static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
656
657/** Function table for the PXOR instruction */
658static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
659/** Function table for the PCMPEQB instruction */
660static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
661/** Function table for the PCMPEQW instruction */
662static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
663/** Function table for the PCMPEQD instruction */
664static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
665
666
667#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
668/** What IEM just wrote. */
669uint8_t g_abIemWrote[256];
670/** How much IEM just wrote. */
671size_t g_cbIemWrote;
672#endif
673
674
675/*******************************************************************************
676* Internal Functions *
677*******************************************************************************/
678static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
679static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
680static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
681static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
682/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
683static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
684static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
687static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
688static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
689static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
690static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
691static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
692static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
693static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
694static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
695static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
696static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
697static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
698static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
704static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
705static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
706static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
707static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
708static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
709static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
710static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
711
712#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
713static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
714#endif
715static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
716static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
717
718
719
720/**
721 * Sets the pass up status.
722 *
723 * @returns VINF_SUCCESS.
724 * @param pIemCpu The per CPU IEM state of the calling thread.
725 * @param rcPassUp The pass up status. Must be informational.
726 * VINF_SUCCESS is not allowed.
727 */
728static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
729{
730 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
731
732 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
733 if (rcOldPassUp == VINF_SUCCESS)
734 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
735 /* If both are EM scheduling codes, use EM priority rules. */
736 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
737 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
738 {
739 if (rcPassUp < rcOldPassUp)
740 {
741 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
742 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
743 }
744 else
745 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 }
747 /* Override EM scheduling with specific status code. */
748 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 /* Don't override specific status code, first come first served. */
754 else
755 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Initializes the execution state.
762 *
763 * @param pIemCpu The per CPU IEM state.
764 * @param fBypassHandlers Whether to bypass access handlers.
765 */
766DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
767{
768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
770
771#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
773 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
780#endif
781
782#ifdef VBOX_WITH_RAW_MODE_NOT_R0
783 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
784#endif
785 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
786 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
787 ? IEMMODE_64BIT
788 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
789 ? IEMMODE_32BIT
790 : IEMMODE_16BIT;
791 pIemCpu->enmCpuMode = enmMode;
792#ifdef VBOX_STRICT
793 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
794 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
795 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
796 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
797 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
798 pIemCpu->uRexReg = 127;
799 pIemCpu->uRexB = 127;
800 pIemCpu->uRexIndex = 127;
801 pIemCpu->iEffSeg = 127;
802 pIemCpu->offOpcode = 127;
803 pIemCpu->cbOpcode = 127;
804#endif
805
806 pIemCpu->cActiveMappings = 0;
807 pIemCpu->iNextMapping = 0;
808 pIemCpu->rcPassUp = VINF_SUCCESS;
809 pIemCpu->fBypassHandlers = fBypassHandlers;
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
812 && pCtx->cs.u64Base == 0
813 && pCtx->cs.u32Limit == UINT32_MAX
814 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
815 if (!pIemCpu->fInPatchCode)
816 CPUMRawLeave(pVCpu, VINF_SUCCESS);
817#endif
818}
819
820
821/**
822 * Initializes the decoder state.
823 *
824 * @param pIemCpu The per CPU IEM state.
825 * @param fBypassHandlers Whether to bypass access handlers.
826 */
827DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
828{
829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
830 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
831
832#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
837 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
841#endif
842
843#ifdef VBOX_WITH_RAW_MODE_NOT_R0
844 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
845#endif
846 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
847#ifdef IEM_VERIFICATION_MODE_FULL
848 if (pIemCpu->uInjectCpl != UINT8_MAX)
849 pIemCpu->uCpl = pIemCpu->uInjectCpl;
850#endif
851 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
852 ? IEMMODE_64BIT
853 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
854 ? IEMMODE_32BIT
855 : IEMMODE_16BIT;
856 pIemCpu->enmCpuMode = enmMode;
857 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
858 pIemCpu->enmEffAddrMode = enmMode;
859 if (enmMode != IEMMODE_64BIT)
860 {
861 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
862 pIemCpu->enmEffOpSize = enmMode;
863 }
864 else
865 {
866 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
867 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
868 }
869 pIemCpu->fPrefixes = 0;
870 pIemCpu->uRexReg = 0;
871 pIemCpu->uRexB = 0;
872 pIemCpu->uRexIndex = 0;
873 pIemCpu->iEffSeg = X86_SREG_DS;
874 pIemCpu->offOpcode = 0;
875 pIemCpu->cbOpcode = 0;
876 pIemCpu->cActiveMappings = 0;
877 pIemCpu->iNextMapping = 0;
878 pIemCpu->rcPassUp = VINF_SUCCESS;
879 pIemCpu->fBypassHandlers = fBypassHandlers;
880#ifdef VBOX_WITH_RAW_MODE_NOT_R0
881 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
882 && pCtx->cs.u64Base == 0
883 && pCtx->cs.u32Limit == UINT32_MAX
884 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
885 if (!pIemCpu->fInPatchCode)
886 CPUMRawLeave(pVCpu, VINF_SUCCESS);
887#endif
888
889#ifdef DBGFTRACE_ENABLED
890 switch (enmMode)
891 {
892 case IEMMODE_64BIT:
893 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
894 break;
895 case IEMMODE_32BIT:
896 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
897 break;
898 case IEMMODE_16BIT:
899 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
900 break;
901 }
902#endif
903}
904
905
906/**
907 * Prefetch opcodes the first time when starting executing.
908 *
909 * @returns Strict VBox status code.
910 * @param pIemCpu The IEM state.
911 * @param fBypassHandlers Whether to bypass access handlers.
912 */
913static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
914{
915#ifdef IEM_VERIFICATION_MODE_FULL
916 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
917#endif
918 iemInitDecoder(pIemCpu, fBypassHandlers);
919
920 /*
921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
922 *
923 * First translate CS:rIP to a physical address.
924 */
925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
926 uint32_t cbToTryRead;
927 RTGCPTR GCPtrPC;
928 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
929 {
930 cbToTryRead = PAGE_SIZE;
931 GCPtrPC = pCtx->rip;
932 if (!IEM_IS_CANONICAL(GCPtrPC))
933 return iemRaiseGeneralProtectionFault0(pIemCpu);
934 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
935 }
936 else
937 {
938 uint32_t GCPtrPC32 = pCtx->eip;
939 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
940 if (GCPtrPC32 > pCtx->cs.u32Limit)
941 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
942 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
943 if (!cbToTryRead) /* overflowed */
944 {
945 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
946 cbToTryRead = UINT32_MAX;
947 }
948 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
949 Assert(GCPtrPC <= UINT32_MAX);
950 }
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 /* Allow interpretation of patch manager code blocks since they can for
954 instance throw #PFs for perfectly good reasons. */
955 if (pIemCpu->fInPatchCode)
956 {
957 size_t cbRead = 0;
958 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
959 AssertRCReturn(rc, rc);
960 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
961 return VINF_SUCCESS;
962 }
963#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
964
965 RTGCPHYS GCPhys;
966 uint64_t fFlags;
967 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
968 if (RT_FAILURE(rc))
969 {
970 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
971 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
972 }
973 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
974 {
975 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
976 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
977 }
978 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
982 }
983 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
984 /** @todo Check reserved bits and such stuff. PGM is better at doing
985 * that, so do it when implementing the guest virtual address
986 * TLB... */
987
988#ifdef IEM_VERIFICATION_MODE_FULL
989 /*
990 * Optimistic optimization: Use unconsumed opcode bytes from the previous
991 * instruction.
992 */
993 /** @todo optimize this differently by not using PGMPhysRead. */
994 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
995 pIemCpu->GCPhysOpcodes = GCPhys;
996 if ( offPrevOpcodes < cbOldOpcodes
997 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
998 {
999 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1000 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1001 pIemCpu->cbOpcode = cbNew;
1002 return VINF_SUCCESS;
1003 }
1004#endif
1005
1006 /*
1007 * Read the bytes at this address.
1008 */
1009 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1010#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1011 size_t cbActual;
1012 if ( PATMIsEnabled(pVM)
1013 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1014 {
1015 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1016 Assert(cbActual > 0);
1017 pIemCpu->cbOpcode = (uint8_t)cbActual;
1018 }
1019 else
1020#endif
1021 {
1022 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1026 cbToTryRead = sizeof(pIemCpu->abOpcode);
1027
1028 if (!pIemCpu->fBypassHandlers)
1029 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1030 else
1031 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1032 if (rc != VINF_SUCCESS)
1033 {
1034 /** @todo status code handling */
1035 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1036 GCPtrPC, GCPhys, rc, cbToTryRead));
1037 return rc;
1038 }
1039 pIemCpu->cbOpcode = cbToTryRead;
1040 }
1041
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1048 * exception if it fails.
1049 *
1050 * @returns Strict VBox status code.
1051 * @param pIemCpu The IEM state.
1052 * @param cbMin The minimum number of bytes relative offOpcode
1053 * that must be read.
1054 */
1055static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1056{
1057 /*
1058 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1059 *
1060 * First translate CS:rIP to a physical address.
1061 */
1062 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1063 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1064 uint32_t cbToTryRead;
1065 RTGCPTR GCPtrNext;
1066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1067 {
1068 cbToTryRead = PAGE_SIZE;
1069 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1070 if (!IEM_IS_CANONICAL(GCPtrNext))
1071 return iemRaiseGeneralProtectionFault0(pIemCpu);
1072 }
1073 else
1074 {
1075 uint32_t GCPtrNext32 = pCtx->eip;
1076 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1077 GCPtrNext32 += pIemCpu->cbOpcode;
1078 if (GCPtrNext32 > pCtx->cs.u32Limit)
1079 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1080 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1081 if (!cbToTryRead) /* overflowed */
1082 {
1083 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1084 cbToTryRead = UINT32_MAX;
1085 /** @todo check out wrapping around the code segment. */
1086 }
1087 if (cbToTryRead < cbMin - cbLeft)
1088 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1089 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1090 }
1091
1092 /* Only read up to the end of the page, and make sure we don't read more
1093 than the opcode buffer can hold. */
1094 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1095 if (cbToTryRead > cbLeftOnPage)
1096 cbToTryRead = cbLeftOnPage;
1097 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1098 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1099/** @todo r=bird: Convert assertion into undefined opcode exception? */
1100 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1101
1102#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1103 /* Allow interpretation of patch manager code blocks since they can for
1104 instance throw #PFs for perfectly good reasons. */
1105 if (pIemCpu->fInPatchCode)
1106 {
1107 size_t cbRead = 0;
1108 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1109 AssertRCReturn(rc, rc);
1110 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1111 return VINF_SUCCESS;
1112 }
1113#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1114
1115 RTGCPHYS GCPhys;
1116 uint64_t fFlags;
1117 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1118 if (RT_FAILURE(rc))
1119 {
1120 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1121 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1122 }
1123 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1124 {
1125 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1126 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1127 }
1128 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1129 {
1130 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1131 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1132 }
1133 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1134 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1135 /** @todo Check reserved bits and such stuff. PGM is better at doing
1136 * that, so do it when implementing the guest virtual address
1137 * TLB... */
1138
1139 /*
1140 * Read the bytes at this address.
1141 *
1142 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1143 * and since PATM should only patch the start of an instruction there
1144 * should be no need to check again here.
1145 */
1146 if (!pIemCpu->fBypassHandlers)
1147 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead, PGMACCESSORIGIN_IEM);
1148 else
1149 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1150 if (rc != VINF_SUCCESS)
1151 {
1152 /** @todo status code handling */
1153 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1154 return rc;
1155 }
1156 pIemCpu->cbOpcode += cbToTryRead;
1157 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1158
1159 return VINF_SUCCESS;
1160}
1161
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pIemCpu The IEM state.
1168 * @param pb Where to return the opcode byte.
1169 */
1170DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1171{
1172 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1173 if (rcStrict == VINF_SUCCESS)
1174 {
1175 uint8_t offOpcode = pIemCpu->offOpcode;
1176 *pb = pIemCpu->abOpcode[offOpcode];
1177 pIemCpu->offOpcode = offOpcode + 1;
1178 }
1179 else
1180 *pb = 0;
1181 return rcStrict;
1182}
1183
1184
1185/**
1186 * Fetches the next opcode byte.
1187 *
1188 * @returns Strict VBox status code.
1189 * @param pIemCpu The IEM state.
1190 * @param pu8 Where to return the opcode byte.
1191 */
1192DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1193{
1194 uint8_t const offOpcode = pIemCpu->offOpcode;
1195 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1196 {
1197 *pu8 = pIemCpu->abOpcode[offOpcode];
1198 pIemCpu->offOpcode = offOpcode + 1;
1199 return VINF_SUCCESS;
1200 }
1201 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1202}
1203
1204
1205/**
1206 * Fetches the next opcode byte, returns automatically on failure.
1207 *
1208 * @param a_pu8 Where to return the opcode byte.
1209 * @remark Implicitly references pIemCpu.
1210 */
1211#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1212 do \
1213 { \
1214 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1215 if (rcStrict2 != VINF_SUCCESS) \
1216 return rcStrict2; \
1217 } while (0)
1218
1219
1220/**
1221 * Fetches the next signed byte from the opcode stream.
1222 *
1223 * @returns Strict VBox status code.
1224 * @param pIemCpu The IEM state.
1225 * @param pi8 Where to return the signed byte.
1226 */
1227DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1228{
1229 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1230}
1231
1232
1233/**
1234 * Fetches the next signed byte from the opcode stream, returning automatically
1235 * on failure.
1236 *
1237 * @param pi8 Where to return the signed byte.
1238 * @remark Implicitly references pIemCpu.
1239 */
1240#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1241 do \
1242 { \
1243 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1244 if (rcStrict2 != VINF_SUCCESS) \
1245 return rcStrict2; \
1246 } while (0)
1247
1248
1249/**
1250 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1251 *
1252 * @returns Strict VBox status code.
1253 * @param pIemCpu The IEM state.
1254 * @param pu16 Where to return the opcode dword.
1255 */
1256DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1257{
1258 uint8_t u8;
1259 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1260 if (rcStrict == VINF_SUCCESS)
1261 *pu16 = (int8_t)u8;
1262 return rcStrict;
1263}
1264
1265
1266/**
1267 * Fetches the next signed byte from the opcode stream, extending it to
1268 * unsigned 16-bit.
1269 *
1270 * @returns Strict VBox status code.
1271 * @param pIemCpu The IEM state.
1272 * @param pu16 Where to return the unsigned word.
1273 */
1274DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1275{
1276 uint8_t const offOpcode = pIemCpu->offOpcode;
1277 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1278 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1279
1280 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1281 pIemCpu->offOpcode = offOpcode + 1;
1282 return VINF_SUCCESS;
1283}
1284
1285
1286/**
1287 * Fetches the next signed byte from the opcode stream and sign-extending it to
1288 * a word, returning automatically on failure.
1289 *
1290 * @param pu16 Where to return the word.
1291 * @remark Implicitly references pIemCpu.
1292 */
1293#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1294 do \
1295 { \
1296 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1297 if (rcStrict2 != VINF_SUCCESS) \
1298 return rcStrict2; \
1299 } while (0)
1300
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pIemCpu The IEM state.
1307 * @param pu32 Where to return the opcode dword.
1308 */
1309DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1310{
1311 uint8_t u8;
1312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1313 if (rcStrict == VINF_SUCCESS)
1314 *pu32 = (int8_t)u8;
1315 return rcStrict;
1316}
1317
1318
1319/**
1320 * Fetches the next signed byte from the opcode stream, extending it to
1321 * unsigned 32-bit.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pIemCpu The IEM state.
1325 * @param pu32 Where to return the unsigned dword.
1326 */
1327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1328{
1329 uint8_t const offOpcode = pIemCpu->offOpcode;
1330 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1331 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1332
1333 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1334 pIemCpu->offOpcode = offOpcode + 1;
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Fetches the next signed byte from the opcode stream and sign-extending it to
1341 * a word, returning automatically on failure.
1342 *
1343 * @param pu32 Where to return the word.
1344 * @remark Implicitly references pIemCpu.
1345 */
1346#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1347 do \
1348 { \
1349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1350 if (rcStrict2 != VINF_SUCCESS) \
1351 return rcStrict2; \
1352 } while (0)
1353
1354
1355/**
1356 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1357 *
1358 * @returns Strict VBox status code.
1359 * @param pIemCpu The IEM state.
1360 * @param pu64 Where to return the opcode qword.
1361 */
1362DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1363{
1364 uint8_t u8;
1365 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1366 if (rcStrict == VINF_SUCCESS)
1367 *pu64 = (int8_t)u8;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next signed byte from the opcode stream, extending it to
1374 * unsigned 64-bit.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pIemCpu The IEM state.
1378 * @param pu64 Where to return the unsigned qword.
1379 */
1380DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1381{
1382 uint8_t const offOpcode = pIemCpu->offOpcode;
1383 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1384 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1385
1386 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1387 pIemCpu->offOpcode = offOpcode + 1;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Fetches the next signed byte from the opcode stream and sign-extending it to
1394 * a word, returning automatically on failure.
1395 *
1396 * @param pu64 Where to return the word.
1397 * @remark Implicitly references pIemCpu.
1398 */
1399#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1400 do \
1401 { \
1402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1403 if (rcStrict2 != VINF_SUCCESS) \
1404 return rcStrict2; \
1405 } while (0)
1406
1407
1408/**
1409 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pIemCpu The IEM state.
1413 * @param pu16 Where to return the opcode word.
1414 */
1415DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1416{
1417 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1418 if (rcStrict == VINF_SUCCESS)
1419 {
1420 uint8_t offOpcode = pIemCpu->offOpcode;
1421 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1422 pIemCpu->offOpcode = offOpcode + 2;
1423 }
1424 else
1425 *pu16 = 0;
1426 return rcStrict;
1427}
1428
1429
1430/**
1431 * Fetches the next opcode word.
1432 *
1433 * @returns Strict VBox status code.
1434 * @param pIemCpu The IEM state.
1435 * @param pu16 Where to return the opcode word.
1436 */
1437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1438{
1439 uint8_t const offOpcode = pIemCpu->offOpcode;
1440 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1441 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1442
1443 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1444 pIemCpu->offOpcode = offOpcode + 2;
1445 return VINF_SUCCESS;
1446}
1447
1448
1449/**
1450 * Fetches the next opcode word, returns automatically on failure.
1451 *
1452 * @param a_pu16 Where to return the opcode word.
1453 * @remark Implicitly references pIemCpu.
1454 */
1455#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1456 do \
1457 { \
1458 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1459 if (rcStrict2 != VINF_SUCCESS) \
1460 return rcStrict2; \
1461 } while (0)
1462
1463
1464/**
1465 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pIemCpu The IEM state.
1469 * @param pu32 Where to return the opcode double word.
1470 */
1471DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1472{
1473 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1474 if (rcStrict == VINF_SUCCESS)
1475 {
1476 uint8_t offOpcode = pIemCpu->offOpcode;
1477 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1478 pIemCpu->offOpcode = offOpcode + 2;
1479 }
1480 else
1481 *pu32 = 0;
1482 return rcStrict;
1483}
1484
1485
1486/**
1487 * Fetches the next opcode word, zero extending it to a double word.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pIemCpu The IEM state.
1491 * @param pu32 Where to return the opcode double word.
1492 */
1493DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1494{
1495 uint8_t const offOpcode = pIemCpu->offOpcode;
1496 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1497 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1498
1499 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1500 pIemCpu->offOpcode = offOpcode + 2;
1501 return VINF_SUCCESS;
1502}
1503
1504
1505/**
1506 * Fetches the next opcode word and zero extends it to a double word, returns
1507 * automatically on failure.
1508 *
1509 * @param a_pu32 Where to return the opcode double word.
1510 * @remark Implicitly references pIemCpu.
1511 */
1512#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1513 do \
1514 { \
1515 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1516 if (rcStrict2 != VINF_SUCCESS) \
1517 return rcStrict2; \
1518 } while (0)
1519
1520
1521/**
1522 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1523 *
1524 * @returns Strict VBox status code.
1525 * @param pIemCpu The IEM state.
1526 * @param pu64 Where to return the opcode quad word.
1527 */
1528DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1529{
1530 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1531 if (rcStrict == VINF_SUCCESS)
1532 {
1533 uint8_t offOpcode = pIemCpu->offOpcode;
1534 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1535 pIemCpu->offOpcode = offOpcode + 2;
1536 }
1537 else
1538 *pu64 = 0;
1539 return rcStrict;
1540}
1541
1542
1543/**
1544 * Fetches the next opcode word, zero extending it to a quad word.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pIemCpu The IEM state.
1548 * @param pu64 Where to return the opcode quad word.
1549 */
1550DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1551{
1552 uint8_t const offOpcode = pIemCpu->offOpcode;
1553 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1554 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1555
1556 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1557 pIemCpu->offOpcode = offOpcode + 2;
1558 return VINF_SUCCESS;
1559}
1560
1561
1562/**
1563 * Fetches the next opcode word and zero extends it to a quad word, returns
1564 * automatically on failure.
1565 *
1566 * @param a_pu64 Where to return the opcode quad word.
1567 * @remark Implicitly references pIemCpu.
1568 */
1569#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1570 do \
1571 { \
1572 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1573 if (rcStrict2 != VINF_SUCCESS) \
1574 return rcStrict2; \
1575 } while (0)
1576
1577
1578/**
1579 * Fetches the next signed word from the opcode stream.
1580 *
1581 * @returns Strict VBox status code.
1582 * @param pIemCpu The IEM state.
1583 * @param pi16 Where to return the signed word.
1584 */
1585DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1586{
1587 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1588}
1589
1590
1591/**
1592 * Fetches the next signed word from the opcode stream, returning automatically
1593 * on failure.
1594 *
1595 * @param pi16 Where to return the signed word.
1596 * @remark Implicitly references pIemCpu.
1597 */
1598#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1599 do \
1600 { \
1601 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1602 if (rcStrict2 != VINF_SUCCESS) \
1603 return rcStrict2; \
1604 } while (0)
1605
1606
1607/**
1608 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1609 *
1610 * @returns Strict VBox status code.
1611 * @param pIemCpu The IEM state.
1612 * @param pu32 Where to return the opcode dword.
1613 */
1614DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1615{
1616 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1617 if (rcStrict == VINF_SUCCESS)
1618 {
1619 uint8_t offOpcode = pIemCpu->offOpcode;
1620 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1621 pIemCpu->abOpcode[offOpcode + 1],
1622 pIemCpu->abOpcode[offOpcode + 2],
1623 pIemCpu->abOpcode[offOpcode + 3]);
1624 pIemCpu->offOpcode = offOpcode + 4;
1625 }
1626 else
1627 *pu32 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode dword.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu32 Where to return the opcode double word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1644
1645 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1646 pIemCpu->abOpcode[offOpcode + 1],
1647 pIemCpu->abOpcode[offOpcode + 2],
1648 pIemCpu->abOpcode[offOpcode + 3]);
1649 pIemCpu->offOpcode = offOpcode + 4;
1650 return VINF_SUCCESS;
1651}
1652
1653
1654/**
1655 * Fetches the next opcode dword, returns automatically on failure.
1656 *
1657 * @param a_pu32 Where to return the opcode dword.
1658 * @remark Implicitly references pIemCpu.
1659 */
1660#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1661 do \
1662 { \
1663 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1664 if (rcStrict2 != VINF_SUCCESS) \
1665 return rcStrict2; \
1666 } while (0)
1667
1668
1669/**
1670 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1671 *
1672 * @returns Strict VBox status code.
1673 * @param pIemCpu The IEM state.
1674 * @param pu32 Where to return the opcode dword.
1675 */
1676DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1677{
1678 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1679 if (rcStrict == VINF_SUCCESS)
1680 {
1681 uint8_t offOpcode = pIemCpu->offOpcode;
1682 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1683 pIemCpu->abOpcode[offOpcode + 1],
1684 pIemCpu->abOpcode[offOpcode + 2],
1685 pIemCpu->abOpcode[offOpcode + 3]);
1686 pIemCpu->offOpcode = offOpcode + 4;
1687 }
1688 else
1689 *pu64 = 0;
1690 return rcStrict;
1691}
1692
1693
1694/**
1695 * Fetches the next opcode dword, zero extending it to a quad word.
1696 *
1697 * @returns Strict VBox status code.
1698 * @param pIemCpu The IEM state.
1699 * @param pu64 Where to return the opcode quad word.
1700 */
1701DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1702{
1703 uint8_t const offOpcode = pIemCpu->offOpcode;
1704 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1705 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1706
1707 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1708 pIemCpu->abOpcode[offOpcode + 1],
1709 pIemCpu->abOpcode[offOpcode + 2],
1710 pIemCpu->abOpcode[offOpcode + 3]);
1711 pIemCpu->offOpcode = offOpcode + 4;
1712 return VINF_SUCCESS;
1713}
1714
1715
1716/**
1717 * Fetches the next opcode dword and zero extends it to a quad word, returns
1718 * automatically on failure.
1719 *
1720 * @param a_pu64 Where to return the opcode quad word.
1721 * @remark Implicitly references pIemCpu.
1722 */
1723#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1724 do \
1725 { \
1726 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1727 if (rcStrict2 != VINF_SUCCESS) \
1728 return rcStrict2; \
1729 } while (0)
1730
1731
1732/**
1733 * Fetches the next signed double word from the opcode stream.
1734 *
1735 * @returns Strict VBox status code.
1736 * @param pIemCpu The IEM state.
1737 * @param pi32 Where to return the signed double word.
1738 */
1739DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1740{
1741 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1742}
1743
1744/**
1745 * Fetches the next signed double word from the opcode stream, returning
1746 * automatically on failure.
1747 *
1748 * @param pi32 Where to return the signed double word.
1749 * @remark Implicitly references pIemCpu.
1750 */
1751#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1752 do \
1753 { \
1754 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1755 if (rcStrict2 != VINF_SUCCESS) \
1756 return rcStrict2; \
1757 } while (0)
1758
1759
1760/**
1761 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1762 *
1763 * @returns Strict VBox status code.
1764 * @param pIemCpu The IEM state.
1765 * @param pu64 Where to return the opcode qword.
1766 */
1767DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1768{
1769 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1770 if (rcStrict == VINF_SUCCESS)
1771 {
1772 uint8_t offOpcode = pIemCpu->offOpcode;
1773 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1774 pIemCpu->abOpcode[offOpcode + 1],
1775 pIemCpu->abOpcode[offOpcode + 2],
1776 pIemCpu->abOpcode[offOpcode + 3]);
1777 pIemCpu->offOpcode = offOpcode + 4;
1778 }
1779 else
1780 *pu64 = 0;
1781 return rcStrict;
1782}
1783
1784
1785/**
1786 * Fetches the next opcode dword, sign extending it into a quad word.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pu64 Where to return the opcode quad word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1793{
1794 uint8_t const offOpcode = pIemCpu->offOpcode;
1795 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1796 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1797
1798 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1799 pIemCpu->abOpcode[offOpcode + 1],
1800 pIemCpu->abOpcode[offOpcode + 2],
1801 pIemCpu->abOpcode[offOpcode + 3]);
1802 *pu64 = i32;
1803 pIemCpu->offOpcode = offOpcode + 4;
1804 return VINF_SUCCESS;
1805}
1806
1807
1808/**
1809 * Fetches the next opcode double word and sign extends it to a quad word,
1810 * returns automatically on failure.
1811 *
1812 * @param a_pu64 Where to return the opcode quad word.
1813 * @remark Implicitly references pIemCpu.
1814 */
1815#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1816 do \
1817 { \
1818 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1819 if (rcStrict2 != VINF_SUCCESS) \
1820 return rcStrict2; \
1821 } while (0)
1822
1823
1824/**
1825 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1826 *
1827 * @returns Strict VBox status code.
1828 * @param pIemCpu The IEM state.
1829 * @param pu64 Where to return the opcode qword.
1830 */
1831DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1832{
1833 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1834 if (rcStrict == VINF_SUCCESS)
1835 {
1836 uint8_t offOpcode = pIemCpu->offOpcode;
1837 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1838 pIemCpu->abOpcode[offOpcode + 1],
1839 pIemCpu->abOpcode[offOpcode + 2],
1840 pIemCpu->abOpcode[offOpcode + 3],
1841 pIemCpu->abOpcode[offOpcode + 4],
1842 pIemCpu->abOpcode[offOpcode + 5],
1843 pIemCpu->abOpcode[offOpcode + 6],
1844 pIemCpu->abOpcode[offOpcode + 7]);
1845 pIemCpu->offOpcode = offOpcode + 8;
1846 }
1847 else
1848 *pu64 = 0;
1849 return rcStrict;
1850}
1851
1852
1853/**
1854 * Fetches the next opcode qword.
1855 *
1856 * @returns Strict VBox status code.
1857 * @param pIemCpu The IEM state.
1858 * @param pu64 Where to return the opcode qword.
1859 */
1860DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1861{
1862 uint8_t const offOpcode = pIemCpu->offOpcode;
1863 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1864 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1865
1866 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1867 pIemCpu->abOpcode[offOpcode + 1],
1868 pIemCpu->abOpcode[offOpcode + 2],
1869 pIemCpu->abOpcode[offOpcode + 3],
1870 pIemCpu->abOpcode[offOpcode + 4],
1871 pIemCpu->abOpcode[offOpcode + 5],
1872 pIemCpu->abOpcode[offOpcode + 6],
1873 pIemCpu->abOpcode[offOpcode + 7]);
1874 pIemCpu->offOpcode = offOpcode + 8;
1875 return VINF_SUCCESS;
1876}
1877
1878
1879/**
1880 * Fetches the next opcode quad word, returns automatically on failure.
1881 *
1882 * @param a_pu64 Where to return the opcode quad word.
1883 * @remark Implicitly references pIemCpu.
1884 */
1885#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1886 do \
1887 { \
1888 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1889 if (rcStrict2 != VINF_SUCCESS) \
1890 return rcStrict2; \
1891 } while (0)
1892
1893
1894/** @name Misc Worker Functions.
1895 * @{
1896 */
1897
1898
1899/**
1900 * Validates a new SS segment.
1901 *
1902 * @returns VBox strict status code.
1903 * @param pIemCpu The IEM per CPU instance data.
1904 * @param pCtx The CPU context.
1905 * @param NewSS The new SS selctor.
1906 * @param uCpl The CPL to load the stack for.
1907 * @param pDesc Where to return the descriptor.
1908 */
1909static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1910{
1911 NOREF(pCtx);
1912
1913 /* Null selectors are not allowed (we're not called for dispatching
1914 interrupts with SS=0 in long mode). */
1915 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1916 {
1917 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1918 return iemRaiseTaskSwitchFault0(pIemCpu);
1919 }
1920
1921 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1922 if ((NewSS & X86_SEL_RPL) != uCpl)
1923 {
1924 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1925 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1926 }
1927
1928 /*
1929 * Read the descriptor.
1930 */
1931 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934
1935 /*
1936 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1937 */
1938 if (!pDesc->Legacy.Gen.u1DescType)
1939 {
1940 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1941 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1942 }
1943
1944 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1945 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1946 {
1947 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1948 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1949 }
1950 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1951 {
1952 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1953 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1954 }
1955
1956 /* Is it there? */
1957 /** @todo testcase: Is this checked before the canonical / limit check below? */
1958 if (!pDesc->Legacy.Gen.u1Present)
1959 {
1960 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1961 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1962 }
1963
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1970 * not.
1971 *
1972 * @param a_pIemCpu The IEM per CPU data.
1973 * @param a_pCtx The CPU context.
1974 */
1975#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1976# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1977 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1978 ? (a_pCtx)->eflags.u \
1979 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1980#else
1981# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1982 ( (a_pCtx)->eflags.u )
1983#endif
1984
1985/**
1986 * Updates the EFLAGS in the correct manner wrt. PATM.
1987 *
1988 * @param a_pIemCpu The IEM per CPU data.
1989 * @param a_pCtx The CPU context.
1990 */
1991#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1992# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1993 do { \
1994 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1995 (a_pCtx)->eflags.u = (a_fEfl); \
1996 else \
1997 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1998 } while (0)
1999#else
2000# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2001 do { \
2002 (a_pCtx)->eflags.u = (a_fEfl); \
2003 } while (0)
2004#endif
2005
2006
2007/** @} */
2008
2009/** @name Raising Exceptions.
2010 *
2011 * @{
2012 */
2013
2014/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2015 * @{ */
2016/** CPU exception. */
2017#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2018/** External interrupt (from PIC, APIC, whatever). */
2019#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2020/** Software interrupt (int or into, not bound).
2021 * Returns to the following instruction */
2022#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2023/** Takes an error code. */
2024#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2025/** Takes a CR2. */
2026#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2027/** Generated by the breakpoint instruction. */
2028#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2029/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2030#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2031/** @} */
2032
2033
2034/**
2035 * Loads the specified stack far pointer from the TSS.
2036 *
2037 * @returns VBox strict status code.
2038 * @param pIemCpu The IEM per CPU instance data.
2039 * @param pCtx The CPU context.
2040 * @param uCpl The CPL to load the stack for.
2041 * @param pSelSS Where to return the new stack segment.
2042 * @param puEsp Where to return the new stack pointer.
2043 */
2044static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2045 PRTSEL pSelSS, uint32_t *puEsp)
2046{
2047 VBOXSTRICTRC rcStrict;
2048 Assert(uCpl < 4);
2049 *puEsp = 0; /* make gcc happy */
2050 *pSelSS = 0; /* make gcc happy */
2051
2052 switch (pCtx->tr.Attr.n.u4Type)
2053 {
2054 /*
2055 * 16-bit TSS (X86TSS16).
2056 */
2057 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2058 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2059 {
2060 uint32_t off = uCpl * 4 + 2;
2061 if (off + 4 > pCtx->tr.u32Limit)
2062 {
2063 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2064 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2065 }
2066
2067 uint32_t u32Tmp = 0; /* gcc maybe... */
2068 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2069 if (rcStrict == VINF_SUCCESS)
2070 {
2071 *puEsp = RT_LOWORD(u32Tmp);
2072 *pSelSS = RT_HIWORD(u32Tmp);
2073 return VINF_SUCCESS;
2074 }
2075 break;
2076 }
2077
2078 /*
2079 * 32-bit TSS (X86TSS32).
2080 */
2081 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2082 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2083 {
2084 uint32_t off = uCpl * 8 + 4;
2085 if (off + 7 > pCtx->tr.u32Limit)
2086 {
2087 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2088 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2089 }
2090
2091 uint64_t u64Tmp;
2092 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2093 if (rcStrict == VINF_SUCCESS)
2094 {
2095 *puEsp = u64Tmp & UINT32_MAX;
2096 *pSelSS = (RTSEL)(u64Tmp >> 32);
2097 return VINF_SUCCESS;
2098 }
2099 break;
2100 }
2101
2102 default:
2103 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2104 }
2105 return rcStrict;
2106}
2107
2108
2109/**
2110 * Loads the specified stack pointer from the 64-bit TSS.
2111 *
2112 * @returns VBox strict status code.
2113 * @param pIemCpu The IEM per CPU instance data.
2114 * @param pCtx The CPU context.
2115 * @param uCpl The CPL to load the stack for.
2116 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2117 * @param puRsp Where to return the new stack pointer.
2118 */
2119static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2120 uint64_t *puRsp)
2121{
2122 Assert(uCpl < 4);
2123 Assert(uIst < 8);
2124 *puRsp = 0; /* make gcc happy */
2125
2126 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2127
2128 uint32_t off;
2129 if (uIst)
2130 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2131 else
2132 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2133 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2134 {
2135 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2136 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2137 }
2138
2139 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2140}
2141
2142
2143/**
2144 * Adjust the CPU state according to the exception being raised.
2145 *
2146 * @param pCtx The CPU context.
2147 * @param u8Vector The exception that has been raised.
2148 */
2149DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2150{
2151 switch (u8Vector)
2152 {
2153 case X86_XCPT_DB:
2154 pCtx->dr[7] &= ~X86_DR7_GD;
2155 break;
2156 /** @todo Read the AMD and Intel exception reference... */
2157 }
2158}
2159
2160
2161/**
2162 * Implements exceptions and interrupts for real mode.
2163 *
2164 * @returns VBox strict status code.
2165 * @param pIemCpu The IEM per CPU instance data.
2166 * @param pCtx The CPU context.
2167 * @param cbInstr The number of bytes to offset rIP by in the return
2168 * address.
2169 * @param u8Vector The interrupt / exception vector number.
2170 * @param fFlags The flags.
2171 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2172 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2173 */
2174static VBOXSTRICTRC
2175iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2176 PCPUMCTX pCtx,
2177 uint8_t cbInstr,
2178 uint8_t u8Vector,
2179 uint32_t fFlags,
2180 uint16_t uErr,
2181 uint64_t uCr2)
2182{
2183 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2184 NOREF(uErr); NOREF(uCr2);
2185
2186 /*
2187 * Read the IDT entry.
2188 */
2189 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2190 {
2191 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2192 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2193 }
2194 RTFAR16 Idte;
2195 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2196 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2197 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2198 return rcStrict;
2199
2200 /*
2201 * Push the stack frame.
2202 */
2203 uint16_t *pu16Frame;
2204 uint64_t uNewRsp;
2205 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208
2209 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2210 pu16Frame[2] = (uint16_t)fEfl;
2211 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2212 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2213 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2214 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2215 return rcStrict;
2216
2217 /*
2218 * Load the vector address into cs:ip and make exception specific state
2219 * adjustments.
2220 */
2221 pCtx->cs.Sel = Idte.sel;
2222 pCtx->cs.ValidSel = Idte.sel;
2223 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2224 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2225 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2226 pCtx->rip = Idte.off;
2227 fEfl &= ~X86_EFL_IF;
2228 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2229
2230 /** @todo do we actually do this in real mode? */
2231 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2232 iemRaiseXcptAdjustState(pCtx, u8Vector);
2233
2234 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2235}
2236
2237
2238/**
2239 * Loads a NULL data selector into when coming from V8086 mode.
2240 *
2241 * @param pIemCpu The IEM per CPU instance data.
2242 * @param pSReg Pointer to the segment register.
2243 */
2244static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2245{
2246 pSReg->Sel = 0;
2247 pSReg->ValidSel = 0;
2248 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2249 {
2250 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2251 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2252 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2253 }
2254 else
2255 {
2256 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2257 /** @todo check this on AMD-V */
2258 pSReg->u64Base = 0;
2259 pSReg->u32Limit = 0;
2260 }
2261}
2262
2263
2264/**
2265 * Loads a segment selector during a task switch in V8086 mode.
2266 *
2267 * @param pIemCpu The IEM per CPU instance data.
2268 * @param pSReg Pointer to the segment register.
2269 * @param uSel The selector value to load.
2270 */
2271static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2272{
2273 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2274 pSReg->Sel = uSel;
2275 pSReg->ValidSel = uSel;
2276 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2277 pSReg->u64Base = uSel << 4;
2278 pSReg->u32Limit = 0xffff;
2279 pSReg->Attr.u = 0xf3;
2280}
2281
2282
2283/**
2284 * Loads a NULL data selector into a selector register, both the hidden and
2285 * visible parts, in protected mode.
2286 *
2287 * @param pIemCpu The IEM state of the calling EMT.
2288 * @param pSReg Pointer to the segment register.
2289 * @param uRpl The RPL.
2290 */
2291static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2292{
2293 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2294 * data selector in protected mode. */
2295 pSReg->Sel = uRpl;
2296 pSReg->ValidSel = uRpl;
2297 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2298 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2299 {
2300 /* VT-x (Intel 3960x) observed doing something like this. */
2301 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2302 pSReg->u32Limit = UINT32_MAX;
2303 pSReg->u64Base = 0;
2304 }
2305 else
2306 {
2307 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2308 pSReg->u32Limit = 0;
2309 pSReg->u64Base = 0;
2310 }
2311}
2312
2313
2314/**
2315 * Loads a segment selector during a task switch in protected mode. In this task
2316 * switch scenario, we would throw #TS exceptions rather than #GPs.
2317 *
2318 * @returns VBox strict status code.
2319 * @param pIemCpu The IEM per CPU instance data.
2320 * @param pSReg Pointer to the segment register.
2321 * @param uSel The new selector value.
2322 *
2323 * @remarks This does -NOT- handle CS or SS.
2324 * @remarks This expects pIemCpu->uCpl to be up to date.
2325 */
2326static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2327{
2328 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2329
2330 /* Null data selector. */
2331 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2332 {
2333 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2335 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2336 return VINF_SUCCESS;
2337 }
2338
2339 /* Fetch the descriptor. */
2340 IEMSELDESC Desc;
2341 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2342 if (rcStrict != VINF_SUCCESS)
2343 {
2344 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2345 VBOXSTRICTRC_VAL(rcStrict)));
2346 return rcStrict;
2347 }
2348
2349 /* Must be a data segment or readable code segment. */
2350 if ( !Desc.Legacy.Gen.u1DescType
2351 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2352 {
2353 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2354 Desc.Legacy.Gen.u4Type));
2355 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2356 }
2357
2358 /* Check privileges for data segments and non-conforming code segments. */
2359 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2360 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2361 {
2362 /* The RPL and the new CPL must be less than or equal to the DPL. */
2363 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2364 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2365 {
2366 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2367 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2368 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2369 }
2370 }
2371
2372 /* Is it there? */
2373 if (!Desc.Legacy.Gen.u1Present)
2374 {
2375 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2376 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2377 }
2378
2379 /* The base and limit. */
2380 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2381 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2382
2383 /*
2384 * Ok, everything checked out fine. Now set the accessed bit before
2385 * committing the result into the registers.
2386 */
2387 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2388 {
2389 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2390 if (rcStrict != VINF_SUCCESS)
2391 return rcStrict;
2392 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2393 }
2394
2395 /* Commit */
2396 pSReg->Sel = uSel;
2397 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2398 pSReg->u32Limit = cbLimit;
2399 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2400 pSReg->ValidSel = uSel;
2401 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2402 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2403 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2404
2405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2406 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2407 return VINF_SUCCESS;
2408}
2409
2410
2411/**
2412 * Performs a task switch.
2413 *
2414 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2415 * caller is responsible for performing the necessary checks (like DPL, TSS
2416 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2417 * reference for JMP, CALL, IRET.
2418 *
2419 * If the task switch is the due to a software interrupt or hardware exception,
2420 * the caller is responsible for validating the TSS selector and descriptor. See
2421 * Intel Instruction reference for INT n.
2422 *
2423 * @returns VBox strict status code.
2424 * @param pIemCpu The IEM per CPU instance data.
2425 * @param pCtx The CPU context.
2426 * @param enmTaskSwitch What caused this task switch.
2427 * @param uNextEip The EIP effective after the task switch.
2428 * @param fFlags The flags.
2429 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2430 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2431 * @param SelTSS The TSS selector of the new task.
2432 * @param pNewDescTSS Pointer to the new TSS descriptor.
2433 */
2434static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu,
2435 PCPUMCTX pCtx,
2436 IEMTASKSWITCH enmTaskSwitch,
2437 uint32_t uNextEip,
2438 uint32_t fFlags,
2439 uint16_t uErr,
2440 uint64_t uCr2,
2441 RTSEL SelTSS,
2442 PIEMSELDESC pNewDescTSS)
2443{
2444 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2445 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2446
2447 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2448 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2450 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2451 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2452
2453 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2454 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2455
2456 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2457 fIsNewTSS386, pCtx->eip, uNextEip));
2458
2459 /* Update CR2 in case it's a page-fault. */
2460 /** @todo This should probably be done much earlier in IEM/PGM. See
2461 * @bugref{5653} comment #49. */
2462 if (fFlags & IEM_XCPT_FLAGS_CR2)
2463 pCtx->cr2 = uCr2;
2464
2465 /*
2466 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2467 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2468 */
2469 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2470 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2471 if (uNewTSSLimit < uNewTSSLimitMin)
2472 {
2473 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2474 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2475 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2476 }
2477
2478 /*
2479 * Check the current TSS limit. The last written byte to the current TSS during the
2480 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2481 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2482 *
2483 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2484 * end up with smaller than "legal" TSS limits.
2485 */
2486 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2487 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2488 if (uCurTSSLimit < uCurTSSLimitMin)
2489 {
2490 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2491 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2492 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2493 }
2494
2495 /*
2496 * Verify that the new TSS can be accessed and map it. Map only the required contents
2497 * and not the entire TSS.
2498 */
2499 void *pvNewTSS;
2500 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2501 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2502 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2503 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2504 * not perform correct translation if this happens. See Intel spec. 7.2.1
2505 * "Task-State Segment" */
2506 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2507 if (rcStrict != VINF_SUCCESS)
2508 {
2509 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2510 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2511 return rcStrict;
2512 }
2513
2514 /*
2515 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2516 */
2517 uint32_t u32EFlags = pCtx->eflags.u32;
2518 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2519 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2520 {
2521 PX86DESC pDescCurTSS;
2522 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2523 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2524 if (rcStrict != VINF_SUCCESS)
2525 {
2526 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2527 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2528 return rcStrict;
2529 }
2530
2531 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2532 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2533 if (rcStrict != VINF_SUCCESS)
2534 {
2535 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2536 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2537 return rcStrict;
2538 }
2539
2540 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2541 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2542 {
2543 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2544 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2545 u32EFlags &= ~X86_EFL_NT;
2546 }
2547 }
2548
2549 /*
2550 * Save the CPU state into the current TSS.
2551 */
2552 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2553 if (GCPtrNewTSS == GCPtrCurTSS)
2554 {
2555 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2556 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2557 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2558 }
2559 if (fIsNewTSS386)
2560 {
2561 /*
2562 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2563 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2564 */
2565 void *pvCurTSS32;
2566 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2567 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2568 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2569 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2570 if (rcStrict != VINF_SUCCESS)
2571 {
2572 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2573 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2574 return rcStrict;
2575 }
2576
2577 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2578 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2579 pCurTSS32->eip = uNextEip;
2580 pCurTSS32->eflags = u32EFlags;
2581 pCurTSS32->eax = pCtx->eax;
2582 pCurTSS32->ecx = pCtx->ecx;
2583 pCurTSS32->edx = pCtx->edx;
2584 pCurTSS32->ebx = pCtx->ebx;
2585 pCurTSS32->esp = pCtx->esp;
2586 pCurTSS32->ebp = pCtx->ebp;
2587 pCurTSS32->esi = pCtx->esi;
2588 pCurTSS32->edi = pCtx->edi;
2589 pCurTSS32->es = pCtx->es.Sel;
2590 pCurTSS32->cs = pCtx->cs.Sel;
2591 pCurTSS32->ss = pCtx->ss.Sel;
2592 pCurTSS32->ds = pCtx->ds.Sel;
2593 pCurTSS32->fs = pCtx->fs.Sel;
2594 pCurTSS32->gs = pCtx->gs.Sel;
2595
2596 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2597 if (rcStrict != VINF_SUCCESS)
2598 {
2599 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2600 VBOXSTRICTRC_VAL(rcStrict)));
2601 return rcStrict;
2602 }
2603 }
2604 else
2605 {
2606 /*
2607 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2608 */
2609 void *pvCurTSS16;
2610 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2611 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2612 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2613 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2614 if (rcStrict != VINF_SUCCESS)
2615 {
2616 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2617 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2618 return rcStrict;
2619 }
2620
2621 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2622 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2623 pCurTSS16->ip = uNextEip;
2624 pCurTSS16->flags = u32EFlags;
2625 pCurTSS16->ax = pCtx->ax;
2626 pCurTSS16->cx = pCtx->cx;
2627 pCurTSS16->dx = pCtx->dx;
2628 pCurTSS16->bx = pCtx->bx;
2629 pCurTSS16->sp = pCtx->sp;
2630 pCurTSS16->bp = pCtx->bp;
2631 pCurTSS16->si = pCtx->si;
2632 pCurTSS16->di = pCtx->di;
2633 pCurTSS16->es = pCtx->es.Sel;
2634 pCurTSS16->cs = pCtx->cs.Sel;
2635 pCurTSS16->ss = pCtx->ss.Sel;
2636 pCurTSS16->ds = pCtx->ds.Sel;
2637
2638 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2639 if (rcStrict != VINF_SUCCESS)
2640 {
2641 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2642 VBOXSTRICTRC_VAL(rcStrict)));
2643 return rcStrict;
2644 }
2645 }
2646
2647 /*
2648 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2649 */
2650 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2651 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2652 {
2653 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2654 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2655 pNewTSS->selPrev = pCtx->tr.Sel;
2656 }
2657
2658 /*
2659 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2660 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2661 */
2662 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2663 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2664 bool fNewDebugTrap;
2665 if (fIsNewTSS386)
2666 {
2667 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2668 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2669 uNewEip = pNewTSS32->eip;
2670 uNewEflags = pNewTSS32->eflags;
2671 uNewEax = pNewTSS32->eax;
2672 uNewEcx = pNewTSS32->ecx;
2673 uNewEdx = pNewTSS32->edx;
2674 uNewEbx = pNewTSS32->ebx;
2675 uNewEsp = pNewTSS32->esp;
2676 uNewEbp = pNewTSS32->ebp;
2677 uNewEsi = pNewTSS32->esi;
2678 uNewEdi = pNewTSS32->edi;
2679 uNewES = pNewTSS32->es;
2680 uNewCS = pNewTSS32->cs;
2681 uNewSS = pNewTSS32->ss;
2682 uNewDS = pNewTSS32->ds;
2683 uNewFS = pNewTSS32->fs;
2684 uNewGS = pNewTSS32->gs;
2685 uNewLdt = pNewTSS32->selLdt;
2686 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2687 }
2688 else
2689 {
2690 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2691 uNewCr3 = 0;
2692 uNewEip = pNewTSS16->ip;
2693 uNewEflags = pNewTSS16->flags;
2694 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2695 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2696 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2697 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2698 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2699 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2700 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2701 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2702 uNewES = pNewTSS16->es;
2703 uNewCS = pNewTSS16->cs;
2704 uNewSS = pNewTSS16->ss;
2705 uNewDS = pNewTSS16->ds;
2706 uNewFS = 0;
2707 uNewGS = 0;
2708 uNewLdt = pNewTSS16->selLdt;
2709 fNewDebugTrap = false;
2710 }
2711
2712 if (GCPtrNewTSS == GCPtrCurTSS)
2713 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2714 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2715
2716 /*
2717 * We're done accessing the new TSS.
2718 */
2719 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2723 return rcStrict;
2724 }
2725
2726 /*
2727 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2728 */
2729 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2730 {
2731 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2732 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2733 if (rcStrict != VINF_SUCCESS)
2734 {
2735 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2736 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2737 return rcStrict;
2738 }
2739
2740 /* Check that the descriptor indicates the new TSS is available (not busy). */
2741 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2742 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2743 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2744
2745 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2746 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2747 if (rcStrict != VINF_SUCCESS)
2748 {
2749 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2750 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2751 return rcStrict;
2752 }
2753 }
2754
2755 /*
2756 * From this point on, we're technically in the new task. We will defer exceptions
2757 * until the completion of the task switch but before executing any instructions in the new task.
2758 */
2759 pCtx->tr.Sel = SelTSS;
2760 pCtx->tr.ValidSel = SelTSS;
2761 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2762 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2763 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2764 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2765 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2766
2767 /* Set the busy bit in TR. */
2768 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2769 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2770 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2771 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2772 {
2773 uNewEflags |= X86_EFL_NT;
2774 }
2775
2776 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2777 pCtx->cr0 |= X86_CR0_TS;
2778 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2779
2780 pCtx->eip = uNewEip;
2781 pCtx->eax = uNewEax;
2782 pCtx->ecx = uNewEcx;
2783 pCtx->edx = uNewEdx;
2784 pCtx->ebx = uNewEbx;
2785 pCtx->esp = uNewEsp;
2786 pCtx->ebp = uNewEbp;
2787 pCtx->esi = uNewEsi;
2788 pCtx->edi = uNewEdi;
2789
2790 uNewEflags &= X86_EFL_LIVE_MASK;
2791 uNewEflags |= X86_EFL_RA1_MASK;
2792 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2793
2794 /*
2795 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2796 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2797 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2798 */
2799 pCtx->es.Sel = uNewES;
2800 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2801 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2802
2803 pCtx->cs.Sel = uNewCS;
2804 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2805 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2806
2807 pCtx->ss.Sel = uNewSS;
2808 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2809 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2810
2811 pCtx->ds.Sel = uNewDS;
2812 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2813 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2814
2815 pCtx->fs.Sel = uNewFS;
2816 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2817 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2818
2819 pCtx->gs.Sel = uNewGS;
2820 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2821 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2822 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2823
2824 pCtx->ldtr.Sel = uNewLdt;
2825 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2826 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2827 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2828
2829 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2830 {
2831 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2832 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2833 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2834 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2835 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2836 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2837 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2838 }
2839
2840 /*
2841 * Switch CR3 for the new task.
2842 */
2843 if ( fIsNewTSS386
2844 && (pCtx->cr0 & X86_CR0_PG))
2845 {
2846 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2847 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2848 {
2849 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2850 AssertRCSuccessReturn(rc, rc);
2851 }
2852 else
2853 pCtx->cr3 = uNewCr3;
2854
2855 /* Inform PGM. */
2856 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2857 {
2858 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2859 AssertRCReturn(rc, rc);
2860 /* ignore informational status codes */
2861 }
2862 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2863 }
2864
2865 /*
2866 * Switch LDTR for the new task.
2867 */
2868 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2869 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2870 else
2871 {
2872 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2873
2874 IEMSELDESC DescNewLdt;
2875 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2876 if (rcStrict != VINF_SUCCESS)
2877 {
2878 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2879 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2880 return rcStrict;
2881 }
2882 if ( !DescNewLdt.Legacy.Gen.u1Present
2883 || DescNewLdt.Legacy.Gen.u1DescType
2884 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2885 {
2886 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2887 uNewLdt, DescNewLdt.Legacy.u));
2888 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2889 }
2890
2891 pCtx->ldtr.ValidSel = uNewLdt;
2892 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2893 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2894 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2895 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2896 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2897 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2898 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2899 }
2900
2901 IEMSELDESC DescSS;
2902 if (IEM_IS_V86_MODE(pIemCpu))
2903 {
2904 pIemCpu->uCpl = 3;
2905 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2906 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2907 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2908 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2909 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2910 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2911 }
2912 else
2913 {
2914 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2915
2916 /*
2917 * Load the stack segment for the new task.
2918 */
2919 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2920 {
2921 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2922 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2923 }
2924
2925 /* Fetch the descriptor. */
2926 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2927 if (rcStrict != VINF_SUCCESS)
2928 {
2929 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2930 VBOXSTRICTRC_VAL(rcStrict)));
2931 return rcStrict;
2932 }
2933
2934 /* SS must be a data segment and writable. */
2935 if ( !DescSS.Legacy.Gen.u1DescType
2936 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2937 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2938 {
2939 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2940 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2941 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2945 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2946 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2947 {
2948 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2949 uNewCpl));
2950 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2951 }
2952
2953 /* Is it there? */
2954 if (!DescSS.Legacy.Gen.u1Present)
2955 {
2956 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2957 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2958 }
2959
2960 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2961 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2962
2963 /* Set the accessed bit before committing the result into SS. */
2964 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2965 {
2966 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2967 if (rcStrict != VINF_SUCCESS)
2968 return rcStrict;
2969 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2970 }
2971
2972 /* Commit SS. */
2973 pCtx->ss.Sel = uNewSS;
2974 pCtx->ss.ValidSel = uNewSS;
2975 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2976 pCtx->ss.u32Limit = cbLimit;
2977 pCtx->ss.u64Base = u64Base;
2978 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
2980
2981 /* CPL has changed, update IEM before loading rest of segments. */
2982 pIemCpu->uCpl = uNewCpl;
2983
2984 /*
2985 * Load the data segments for the new task.
2986 */
2987 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
2988 if (rcStrict != VINF_SUCCESS)
2989 return rcStrict;
2990 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
2991 if (rcStrict != VINF_SUCCESS)
2992 return rcStrict;
2993 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
2994 if (rcStrict != VINF_SUCCESS)
2995 return rcStrict;
2996 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
2997 if (rcStrict != VINF_SUCCESS)
2998 return rcStrict;
2999
3000 /*
3001 * Load the code segment for the new task.
3002 */
3003 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3004 {
3005 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3006 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 /* Fetch the descriptor. */
3010 IEMSELDESC DescCS;
3011 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3012 if (rcStrict != VINF_SUCCESS)
3013 {
3014 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3015 return rcStrict;
3016 }
3017
3018 /* CS must be a code segment. */
3019 if ( !DescCS.Legacy.Gen.u1DescType
3020 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3021 {
3022 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3023 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3024 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3025 }
3026
3027 /* For conforming CS, DPL must be less than or equal to the RPL. */
3028 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3029 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3030 {
3031 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3032 DescCS.Legacy.Gen.u2Dpl));
3033 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3034 }
3035
3036 /* For non-conforming CS, DPL must match RPL. */
3037 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3038 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3039 {
3040 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3041 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3042 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3043 }
3044
3045 /* Is it there? */
3046 if (!DescCS.Legacy.Gen.u1Present)
3047 {
3048 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3049 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3050 }
3051
3052 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3053 u64Base = X86DESC_BASE(&DescCS.Legacy);
3054
3055 /* Set the accessed bit before committing the result into CS. */
3056 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3057 {
3058 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3059 if (rcStrict != VINF_SUCCESS)
3060 return rcStrict;
3061 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3062 }
3063
3064 /* Commit CS. */
3065 pCtx->cs.Sel = uNewCS;
3066 pCtx->cs.ValidSel = uNewCS;
3067 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3068 pCtx->cs.u32Limit = cbLimit;
3069 pCtx->cs.u64Base = u64Base;
3070 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3072 }
3073
3074 /** @todo Debug trap. */
3075 if (fIsNewTSS386 && fNewDebugTrap)
3076 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3077
3078 /*
3079 * Construct the error code masks based on what caused this task switch.
3080 * See Intel Instruction reference for INT.
3081 */
3082 uint16_t uExt;
3083 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3084 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3085 {
3086 uExt = 1;
3087 }
3088 else
3089 uExt = 0;
3090
3091 /*
3092 * Push any error code on to the new stack.
3093 */
3094 if (fFlags & IEM_XCPT_FLAGS_ERR)
3095 {
3096 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3097 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3098 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3099
3100 /* Check that there is sufficient space on the stack. */
3101 /** @todo Factor out segment limit checking for normal/expand down segments
3102 * into a separate function. */
3103 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3104 {
3105 if ( pCtx->esp - 1 > cbLimitSS
3106 || pCtx->esp < cbStackFrame)
3107 {
3108 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3109 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3110 cbStackFrame));
3111 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3112 }
3113 }
3114 else
3115 {
3116 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3117 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3118 {
3119 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3120 cbStackFrame));
3121 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3122 }
3123 }
3124
3125
3126 if (fIsNewTSS386)
3127 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3128 else
3129 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3130 if (rcStrict != VINF_SUCCESS)
3131 {
3132 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3133 VBOXSTRICTRC_VAL(rcStrict)));
3134 return rcStrict;
3135 }
3136 }
3137
3138 /* Check the new EIP against the new CS limit. */
3139 if (pCtx->eip > pCtx->cs.u32Limit)
3140 {
3141 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3142 pCtx->eip, pCtx->cs.u32Limit));
3143 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3144 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3145 }
3146
3147 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3148 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3149}
3150
3151
3152/**
3153 * Implements exceptions and interrupts for protected mode.
3154 *
3155 * @returns VBox strict status code.
3156 * @param pIemCpu The IEM per CPU instance data.
3157 * @param pCtx The CPU context.
3158 * @param cbInstr The number of bytes to offset rIP by in the return
3159 * address.
3160 * @param u8Vector The interrupt / exception vector number.
3161 * @param fFlags The flags.
3162 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3163 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3164 */
3165static VBOXSTRICTRC
3166iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3167 PCPUMCTX pCtx,
3168 uint8_t cbInstr,
3169 uint8_t u8Vector,
3170 uint32_t fFlags,
3171 uint16_t uErr,
3172 uint64_t uCr2)
3173{
3174 /*
3175 * Read the IDT entry.
3176 */
3177 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3178 {
3179 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3180 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182 X86DESC Idte;
3183 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3184 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3185 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3186 return rcStrict;
3187 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3188 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3189 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3190
3191 /*
3192 * Check the descriptor type, DPL and such.
3193 * ASSUMES this is done in the same order as described for call-gate calls.
3194 */
3195 if (Idte.Gate.u1DescType)
3196 {
3197 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3198 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3199 }
3200 bool fTaskGate = false;
3201 uint8_t f32BitGate = true;
3202 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3203 switch (Idte.Gate.u4Type)
3204 {
3205 case X86_SEL_TYPE_SYS_UNDEFINED:
3206 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3207 case X86_SEL_TYPE_SYS_LDT:
3208 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3209 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3210 case X86_SEL_TYPE_SYS_UNDEFINED2:
3211 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3212 case X86_SEL_TYPE_SYS_UNDEFINED3:
3213 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3214 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3215 case X86_SEL_TYPE_SYS_UNDEFINED4:
3216 {
3217 /** @todo check what actually happens when the type is wrong...
3218 * esp. call gates. */
3219 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3220 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 case X86_SEL_TYPE_SYS_286_INT_GATE:
3224 f32BitGate = false;
3225 case X86_SEL_TYPE_SYS_386_INT_GATE:
3226 fEflToClear |= X86_EFL_IF;
3227 break;
3228
3229 case X86_SEL_TYPE_SYS_TASK_GATE:
3230 fTaskGate = true;
3231#ifndef IEM_IMPLEMENTS_TASKSWITCH
3232 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3233#endif
3234 break;
3235
3236 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3237 f32BitGate = false;
3238 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3239 break;
3240
3241 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3242 }
3243
3244 /* Check DPL against CPL if applicable. */
3245 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3246 {
3247 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3250 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3251 }
3252 }
3253
3254 /* Is it there? */
3255 if (!Idte.Gate.u1Present)
3256 {
3257 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3258 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3259 }
3260
3261 /* Is it a task-gate? */
3262 if (fTaskGate)
3263 {
3264 /*
3265 * Construct the error code masks based on what caused this task switch.
3266 * See Intel Instruction reference for INT.
3267 */
3268 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3269 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3270 RTSEL SelTSS = Idte.Gate.u16Sel;
3271
3272 /*
3273 * Fetch the TSS descriptor in the GDT.
3274 */
3275 IEMSELDESC DescTSS;
3276 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3277 if (rcStrict != VINF_SUCCESS)
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3280 VBOXSTRICTRC_VAL(rcStrict)));
3281 return rcStrict;
3282 }
3283
3284 /* The TSS descriptor must be a system segment and be available (not busy). */
3285 if ( DescTSS.Legacy.Gen.u1DescType
3286 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3287 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3288 {
3289 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3290 u8Vector, SelTSS, DescTSS.Legacy.au64));
3291 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3292 }
3293
3294 /* The TSS must be present. */
3295 if (!DescTSS.Legacy.Gen.u1Present)
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3298 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3299 }
3300
3301 /* Do the actual task switch. */
3302 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3303 }
3304
3305 /* A null CS is bad. */
3306 RTSEL NewCS = Idte.Gate.u16Sel;
3307 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3308 {
3309 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3310 return iemRaiseGeneralProtectionFault0(pIemCpu);
3311 }
3312
3313 /* Fetch the descriptor for the new CS. */
3314 IEMSELDESC DescCS;
3315 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3316 if (rcStrict != VINF_SUCCESS)
3317 {
3318 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3319 return rcStrict;
3320 }
3321
3322 /* Must be a code segment. */
3323 if (!DescCS.Legacy.Gen.u1DescType)
3324 {
3325 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3326 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3327 }
3328 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3329 {
3330 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3331 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3332 }
3333
3334 /* Don't allow lowering the privilege level. */
3335 /** @todo Does the lowering of privileges apply to software interrupts
3336 * only? This has bearings on the more-privileged or
3337 * same-privilege stack behavior further down. A testcase would
3338 * be nice. */
3339 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3340 {
3341 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3342 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3343 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3344 }
3345
3346 /* Make sure the selector is present. */
3347 if (!DescCS.Legacy.Gen.u1Present)
3348 {
3349 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3350 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3351 }
3352
3353 /* Check the new EIP against the new CS limit. */
3354 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3355 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3356 ? Idte.Gate.u16OffsetLow
3357 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3358 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3359 if (uNewEip > cbLimitCS)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3362 u8Vector, uNewEip, cbLimitCS, NewCS));
3363 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3364 }
3365
3366 /* Calc the flag image to push. */
3367 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3368 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3369 fEfl &= ~X86_EFL_RF;
3370 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3371 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3372
3373 /* From V8086 mode only go to CPL 0. */
3374 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3375 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3376 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3377 {
3378 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3379 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3380 }
3381
3382 /*
3383 * If the privilege level changes, we need to get a new stack from the TSS.
3384 * This in turns means validating the new SS and ESP...
3385 */
3386 if (uNewCpl != pIemCpu->uCpl)
3387 {
3388 RTSEL NewSS;
3389 uint32_t uNewEsp;
3390 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3391 if (rcStrict != VINF_SUCCESS)
3392 return rcStrict;
3393
3394 IEMSELDESC DescSS;
3395 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3396 if (rcStrict != VINF_SUCCESS)
3397 return rcStrict;
3398
3399 /* Check that there is sufficient space for the stack frame. */
3400 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3401 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3402 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3403 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3404
3405 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3406 {
3407 if ( uNewEsp - 1 > cbLimitSS
3408 || uNewEsp < cbStackFrame)
3409 {
3410 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3411 u8Vector, NewSS, uNewEsp, cbStackFrame));
3412 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3413 }
3414 }
3415 else
3416 {
3417 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3418 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3419 {
3420 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3421 u8Vector, NewSS, uNewEsp, cbStackFrame));
3422 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3423 }
3424 }
3425
3426 /*
3427 * Start making changes.
3428 */
3429
3430 /* Create the stack frame. */
3431 RTPTRUNION uStackFrame;
3432 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3433 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436 void * const pvStackFrame = uStackFrame.pv;
3437 if (f32BitGate)
3438 {
3439 if (fFlags & IEM_XCPT_FLAGS_ERR)
3440 *uStackFrame.pu32++ = uErr;
3441 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3442 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3443 uStackFrame.pu32[2] = fEfl;
3444 uStackFrame.pu32[3] = pCtx->esp;
3445 uStackFrame.pu32[4] = pCtx->ss.Sel;
3446 if (fEfl & X86_EFL_VM)
3447 {
3448 uStackFrame.pu32[1] = pCtx->cs.Sel;
3449 uStackFrame.pu32[5] = pCtx->es.Sel;
3450 uStackFrame.pu32[6] = pCtx->ds.Sel;
3451 uStackFrame.pu32[7] = pCtx->fs.Sel;
3452 uStackFrame.pu32[8] = pCtx->gs.Sel;
3453 }
3454 }
3455 else
3456 {
3457 if (fFlags & IEM_XCPT_FLAGS_ERR)
3458 *uStackFrame.pu16++ = uErr;
3459 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3460 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3461 uStackFrame.pu16[2] = fEfl;
3462 uStackFrame.pu16[3] = pCtx->sp;
3463 uStackFrame.pu16[4] = pCtx->ss.Sel;
3464 if (fEfl & X86_EFL_VM)
3465 {
3466 uStackFrame.pu16[1] = pCtx->cs.Sel;
3467 uStackFrame.pu16[5] = pCtx->es.Sel;
3468 uStackFrame.pu16[6] = pCtx->ds.Sel;
3469 uStackFrame.pu16[7] = pCtx->fs.Sel;
3470 uStackFrame.pu16[8] = pCtx->gs.Sel;
3471 }
3472 }
3473 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3474 if (rcStrict != VINF_SUCCESS)
3475 return rcStrict;
3476
3477 /* Mark the selectors 'accessed' (hope this is the correct time). */
3478 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3479 * after pushing the stack frame? (Write protect the gdt + stack to
3480 * find out.) */
3481 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3482 {
3483 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3484 if (rcStrict != VINF_SUCCESS)
3485 return rcStrict;
3486 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3487 }
3488
3489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3490 {
3491 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3495 }
3496
3497 /*
3498 * Start comitting the register changes (joins with the DPL=CPL branch).
3499 */
3500 pCtx->ss.Sel = NewSS;
3501 pCtx->ss.ValidSel = NewSS;
3502 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3503 pCtx->ss.u32Limit = cbLimitSS;
3504 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3505 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3506 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3507 pIemCpu->uCpl = uNewCpl;
3508
3509 if (fEfl & X86_EFL_VM)
3510 {
3511 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3512 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3513 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3514 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3515 }
3516 }
3517 /*
3518 * Same privilege, no stack change and smaller stack frame.
3519 */
3520 else
3521 {
3522 uint64_t uNewRsp;
3523 RTPTRUNION uStackFrame;
3524 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3525 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3526 if (rcStrict != VINF_SUCCESS)
3527 return rcStrict;
3528 void * const pvStackFrame = uStackFrame.pv;
3529
3530 if (f32BitGate)
3531 {
3532 if (fFlags & IEM_XCPT_FLAGS_ERR)
3533 *uStackFrame.pu32++ = uErr;
3534 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3535 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3536 uStackFrame.pu32[2] = fEfl;
3537 }
3538 else
3539 {
3540 if (fFlags & IEM_XCPT_FLAGS_ERR)
3541 *uStackFrame.pu16++ = uErr;
3542 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3543 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3544 uStackFrame.pu16[2] = fEfl;
3545 }
3546 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3547 if (rcStrict != VINF_SUCCESS)
3548 return rcStrict;
3549
3550 /* Mark the CS selector as 'accessed'. */
3551 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3552 {
3553 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3554 if (rcStrict != VINF_SUCCESS)
3555 return rcStrict;
3556 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3557 }
3558
3559 /*
3560 * Start committing the register changes (joins with the other branch).
3561 */
3562 pCtx->rsp = uNewRsp;
3563 }
3564
3565 /* ... register committing continues. */
3566 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3567 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3568 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3569 pCtx->cs.u32Limit = cbLimitCS;
3570 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3571 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3572
3573 pCtx->rip = uNewEip;
3574 fEfl &= ~fEflToClear;
3575 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3576
3577 if (fFlags & IEM_XCPT_FLAGS_CR2)
3578 pCtx->cr2 = uCr2;
3579
3580 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3581 iemRaiseXcptAdjustState(pCtx, u8Vector);
3582
3583 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3584}
3585
3586
3587/**
3588 * Implements exceptions and interrupts for long mode.
3589 *
3590 * @returns VBox strict status code.
3591 * @param pIemCpu The IEM per CPU instance data.
3592 * @param pCtx The CPU context.
3593 * @param cbInstr The number of bytes to offset rIP by in the return
3594 * address.
3595 * @param u8Vector The interrupt / exception vector number.
3596 * @param fFlags The flags.
3597 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3598 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3599 */
3600static VBOXSTRICTRC
3601iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3602 PCPUMCTX pCtx,
3603 uint8_t cbInstr,
3604 uint8_t u8Vector,
3605 uint32_t fFlags,
3606 uint16_t uErr,
3607 uint64_t uCr2)
3608{
3609 /*
3610 * Read the IDT entry.
3611 */
3612 uint16_t offIdt = (uint16_t)u8Vector << 4;
3613 if (pCtx->idtr.cbIdt < offIdt + 7)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3616 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 X86DESC64 Idte;
3619 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3620 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3621 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3622 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3623 return rcStrict;
3624 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3625 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3626 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3627
3628 /*
3629 * Check the descriptor type, DPL and such.
3630 * ASSUMES this is done in the same order as described for call-gate calls.
3631 */
3632 if (Idte.Gate.u1DescType)
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3635 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3636 }
3637 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3638 switch (Idte.Gate.u4Type)
3639 {
3640 case AMD64_SEL_TYPE_SYS_INT_GATE:
3641 fEflToClear |= X86_EFL_IF;
3642 break;
3643 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3644 break;
3645
3646 default:
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3648 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3649 }
3650
3651 /* Check DPL against CPL if applicable. */
3652 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3653 {
3654 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3655 {
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3657 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3658 }
3659 }
3660
3661 /* Is it there? */
3662 if (!Idte.Gate.u1Present)
3663 {
3664 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3665 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3666 }
3667
3668 /* A null CS is bad. */
3669 RTSEL NewCS = Idte.Gate.u16Sel;
3670 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3671 {
3672 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3673 return iemRaiseGeneralProtectionFault0(pIemCpu);
3674 }
3675
3676 /* Fetch the descriptor for the new CS. */
3677 IEMSELDESC DescCS;
3678 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3679 if (rcStrict != VINF_SUCCESS)
3680 {
3681 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3682 return rcStrict;
3683 }
3684
3685 /* Must be a 64-bit code segment. */
3686 if (!DescCS.Long.Gen.u1DescType)
3687 {
3688 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3689 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3690 }
3691 if ( !DescCS.Long.Gen.u1Long
3692 || DescCS.Long.Gen.u1DefBig
3693 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3694 {
3695 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3696 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3697 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3698 }
3699
3700 /* Don't allow lowering the privilege level. For non-conforming CS
3701 selectors, the CS.DPL sets the privilege level the trap/interrupt
3702 handler runs at. For conforming CS selectors, the CPL remains
3703 unchanged, but the CS.DPL must be <= CPL. */
3704 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3705 * when CPU in Ring-0. Result \#GP? */
3706 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3709 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3710 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3711 }
3712
3713
3714 /* Make sure the selector is present. */
3715 if (!DescCS.Legacy.Gen.u1Present)
3716 {
3717 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3718 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3719 }
3720
3721 /* Check that the new RIP is canonical. */
3722 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3723 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3724 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3725 if (!IEM_IS_CANONICAL(uNewRip))
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3728 return iemRaiseGeneralProtectionFault0(pIemCpu);
3729 }
3730
3731 /*
3732 * If the privilege level changes or if the IST isn't zero, we need to get
3733 * a new stack from the TSS.
3734 */
3735 uint64_t uNewRsp;
3736 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3737 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3738 if ( uNewCpl != pIemCpu->uCpl
3739 || Idte.Gate.u3IST != 0)
3740 {
3741 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3742 if (rcStrict != VINF_SUCCESS)
3743 return rcStrict;
3744 }
3745 else
3746 uNewRsp = pCtx->rsp;
3747 uNewRsp &= ~(uint64_t)0xf;
3748
3749 /*
3750 * Calc the flag image to push.
3751 */
3752 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3753 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3754 fEfl &= ~X86_EFL_RF;
3755 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3756 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3757
3758 /*
3759 * Start making changes.
3760 */
3761
3762 /* Create the stack frame. */
3763 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3764 RTPTRUNION uStackFrame;
3765 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3766 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3767 if (rcStrict != VINF_SUCCESS)
3768 return rcStrict;
3769 void * const pvStackFrame = uStackFrame.pv;
3770
3771 if (fFlags & IEM_XCPT_FLAGS_ERR)
3772 *uStackFrame.pu64++ = uErr;
3773 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3774 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3775 uStackFrame.pu64[2] = fEfl;
3776 uStackFrame.pu64[3] = pCtx->rsp;
3777 uStackFrame.pu64[4] = pCtx->ss.Sel;
3778 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3779 if (rcStrict != VINF_SUCCESS)
3780 return rcStrict;
3781
3782 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3783 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3784 * after pushing the stack frame? (Write protect the gdt + stack to
3785 * find out.) */
3786 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3787 {
3788 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3789 if (rcStrict != VINF_SUCCESS)
3790 return rcStrict;
3791 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3792 }
3793
3794 /*
3795 * Start comitting the register changes.
3796 */
3797 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3798 * hidden registers when interrupting 32-bit or 16-bit code! */
3799 if (uNewCpl != pIemCpu->uCpl)
3800 {
3801 pCtx->ss.Sel = 0 | uNewCpl;
3802 pCtx->ss.ValidSel = 0 | uNewCpl;
3803 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3804 pCtx->ss.u32Limit = UINT32_MAX;
3805 pCtx->ss.u64Base = 0;
3806 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3807 }
3808 pCtx->rsp = uNewRsp - cbStackFrame;
3809 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3810 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3811 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3812 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3813 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3814 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3815 pCtx->rip = uNewRip;
3816 pIemCpu->uCpl = uNewCpl;
3817
3818 fEfl &= ~fEflToClear;
3819 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3820
3821 if (fFlags & IEM_XCPT_FLAGS_CR2)
3822 pCtx->cr2 = uCr2;
3823
3824 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3825 iemRaiseXcptAdjustState(pCtx, u8Vector);
3826
3827 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3828}
3829
3830
3831/**
3832 * Implements exceptions and interrupts.
3833 *
3834 * All exceptions and interrupts goes thru this function!
3835 *
3836 * @returns VBox strict status code.
3837 * @param pIemCpu The IEM per CPU instance data.
3838 * @param cbInstr The number of bytes to offset rIP by in the return
3839 * address.
3840 * @param u8Vector The interrupt / exception vector number.
3841 * @param fFlags The flags.
3842 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3843 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3844 */
3845DECL_NO_INLINE(static, VBOXSTRICTRC)
3846iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3847 uint8_t cbInstr,
3848 uint8_t u8Vector,
3849 uint32_t fFlags,
3850 uint16_t uErr,
3851 uint64_t uCr2)
3852{
3853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3854#ifdef IN_RING0
3855 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3856 AssertRCReturn(rc, rc);
3857#endif
3858
3859 /*
3860 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3861 */
3862 if ( pCtx->eflags.Bits.u1VM
3863 && pCtx->eflags.Bits.u2IOPL != 3
3864 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3865 && (pCtx->cr0 & X86_CR0_PE) )
3866 {
3867 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3868 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3869 u8Vector = X86_XCPT_GP;
3870 uErr = 0;
3871 }
3872#ifdef DBGFTRACE_ENABLED
3873 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3874 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3875 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3876#endif
3877
3878 /*
3879 * Do recursion accounting.
3880 */
3881 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3882 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3883 if (pIemCpu->cXcptRecursions == 0)
3884 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3885 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3886 else
3887 {
3888 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3889 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3890
3891 /** @todo double and tripple faults. */
3892 if (pIemCpu->cXcptRecursions >= 3)
3893 {
3894#ifdef DEBUG_bird
3895 AssertFailed();
3896#endif
3897 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3898 }
3899
3900 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3901 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3902 {
3903 ....
3904 } */
3905 }
3906 pIemCpu->cXcptRecursions++;
3907 pIemCpu->uCurXcpt = u8Vector;
3908 pIemCpu->fCurXcpt = fFlags;
3909
3910 /*
3911 * Extensive logging.
3912 */
3913#if defined(LOG_ENABLED) && defined(IN_RING3)
3914 if (LogIs3Enabled())
3915 {
3916 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3917 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3918 char szRegs[4096];
3919 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3920 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3921 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3922 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3923 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3924 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3925 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3926 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3927 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3928 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3929 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3930 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3931 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3932 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3933 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3934 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3935 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3936 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3937 " efer=%016VR{efer}\n"
3938 " pat=%016VR{pat}\n"
3939 " sf_mask=%016VR{sf_mask}\n"
3940 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3941 " lstar=%016VR{lstar}\n"
3942 " star=%016VR{star} cstar=%016VR{cstar}\n"
3943 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3944 );
3945
3946 char szInstr[256];
3947 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3948 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3949 szInstr, sizeof(szInstr), NULL);
3950 Log3(("%s%s\n", szRegs, szInstr));
3951 }
3952#endif /* LOG_ENABLED */
3953
3954 /*
3955 * Call the mode specific worker function.
3956 */
3957 VBOXSTRICTRC rcStrict;
3958 if (!(pCtx->cr0 & X86_CR0_PE))
3959 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3960 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3961 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3962 else
3963 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3964
3965 /*
3966 * Unwind.
3967 */
3968 pIemCpu->cXcptRecursions--;
3969 pIemCpu->uCurXcpt = uPrevXcpt;
3970 pIemCpu->fCurXcpt = fPrevXcpt;
3971 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
3972 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
3973 return rcStrict;
3974}
3975
3976
3977/** \#DE - 00. */
3978DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
3979{
3980 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3981}
3982
3983
3984/** \#DB - 01.
3985 * @note This automatically clear DR7.GD. */
3986DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
3987{
3988 /** @todo set/clear RF. */
3989 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
3990 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3991}
3992
3993
3994/** \#UD - 06. */
3995DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
3996{
3997 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3998}
3999
4000
4001/** \#NM - 07. */
4002DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4003{
4004 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4005}
4006
4007
4008/** \#TS(err) - 0a. */
4009DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4010{
4011 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4012}
4013
4014
4015/** \#TS(tr) - 0a. */
4016DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4017{
4018 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4019 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4020}
4021
4022
4023/** \#TS(0) - 0a. */
4024DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4025{
4026 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4027 0, 0);
4028}
4029
4030
4031/** \#TS(err) - 0a. */
4032DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4033{
4034 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4035 uSel & X86_SEL_MASK_OFF_RPL, 0);
4036}
4037
4038
4039/** \#NP(err) - 0b. */
4040DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4041{
4042 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4043}
4044
4045
4046/** \#NP(seg) - 0b. */
4047DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4048{
4049 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4050 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4051}
4052
4053
4054/** \#NP(sel) - 0b. */
4055DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4056{
4057 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4058 uSel & ~X86_SEL_RPL, 0);
4059}
4060
4061
4062/** \#SS(seg) - 0c. */
4063DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4064{
4065 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4066 uSel & ~X86_SEL_RPL, 0);
4067}
4068
4069
4070/** \#SS(err) - 0c. */
4071DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4072{
4073 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4074}
4075
4076
4077/** \#GP(n) - 0d. */
4078DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4079{
4080 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4081}
4082
4083
4084/** \#GP(0) - 0d. */
4085DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4086{
4087 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4088}
4089
4090
4091/** \#GP(sel) - 0d. */
4092DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4093{
4094 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4095 Sel & ~X86_SEL_RPL, 0);
4096}
4097
4098
4099/** \#GP(0) - 0d. */
4100DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4101{
4102 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4103}
4104
4105
4106/** \#GP(sel) - 0d. */
4107DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4108{
4109 NOREF(iSegReg); NOREF(fAccess);
4110 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4111 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4112}
4113
4114
4115/** \#GP(sel) - 0d. */
4116DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4117{
4118 NOREF(Sel);
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4120}
4121
4122
4123/** \#GP(sel) - 0d. */
4124DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4125{
4126 NOREF(iSegReg); NOREF(fAccess);
4127 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4128}
4129
4130
4131/** \#PF(n) - 0e. */
4132DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4133{
4134 uint16_t uErr;
4135 switch (rc)
4136 {
4137 case VERR_PAGE_NOT_PRESENT:
4138 case VERR_PAGE_TABLE_NOT_PRESENT:
4139 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4140 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4141 uErr = 0;
4142 break;
4143
4144 default:
4145 AssertMsgFailed(("%Rrc\n", rc));
4146 case VERR_ACCESS_DENIED:
4147 uErr = X86_TRAP_PF_P;
4148 break;
4149
4150 /** @todo reserved */
4151 }
4152
4153 if (pIemCpu->uCpl == 3)
4154 uErr |= X86_TRAP_PF_US;
4155
4156 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4157 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4158 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4159 uErr |= X86_TRAP_PF_ID;
4160
4161#if 0 /* This is so much non-sense, really. Why was it done like that? */
4162 /* Note! RW access callers reporting a WRITE protection fault, will clear
4163 the READ flag before calling. So, read-modify-write accesses (RW)
4164 can safely be reported as READ faults. */
4165 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4166 uErr |= X86_TRAP_PF_RW;
4167#else
4168 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4169 {
4170 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4171 uErr |= X86_TRAP_PF_RW;
4172 }
4173#endif
4174
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4176 uErr, GCPtrWhere);
4177}
4178
4179
4180/** \#MF(0) - 10. */
4181DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4182{
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4184}
4185
4186
4187/** \#AC(0) - 11. */
4188DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4189{
4190 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4191}
4192
4193
4194/**
4195 * Macro for calling iemCImplRaiseDivideError().
4196 *
4197 * This enables us to add/remove arguments and force different levels of
4198 * inlining as we wish.
4199 *
4200 * @return Strict VBox status code.
4201 */
4202#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4203IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4204{
4205 NOREF(cbInstr);
4206 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4207}
4208
4209
4210/**
4211 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4212 *
4213 * This enables us to add/remove arguments and force different levels of
4214 * inlining as we wish.
4215 *
4216 * @return Strict VBox status code.
4217 */
4218#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4219IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4220{
4221 NOREF(cbInstr);
4222 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4223}
4224
4225
4226/**
4227 * Macro for calling iemCImplRaiseInvalidOpcode().
4228 *
4229 * This enables us to add/remove arguments and force different levels of
4230 * inlining as we wish.
4231 *
4232 * @return Strict VBox status code.
4233 */
4234#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4235IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4236{
4237 NOREF(cbInstr);
4238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4239}
4240
4241
4242/** @} */
4243
4244
4245/*
4246 *
4247 * Helpers routines.
4248 * Helpers routines.
4249 * Helpers routines.
4250 *
4251 */
4252
4253/**
4254 * Recalculates the effective operand size.
4255 *
4256 * @param pIemCpu The IEM state.
4257 */
4258static void iemRecalEffOpSize(PIEMCPU pIemCpu)
4259{
4260 switch (pIemCpu->enmCpuMode)
4261 {
4262 case IEMMODE_16BIT:
4263 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4264 break;
4265 case IEMMODE_32BIT:
4266 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4267 break;
4268 case IEMMODE_64BIT:
4269 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4270 {
4271 case 0:
4272 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4273 break;
4274 case IEM_OP_PRF_SIZE_OP:
4275 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4276 break;
4277 case IEM_OP_PRF_SIZE_REX_W:
4278 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4279 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4280 break;
4281 }
4282 break;
4283 default:
4284 AssertFailed();
4285 }
4286}
4287
4288
4289/**
4290 * Sets the default operand size to 64-bit and recalculates the effective
4291 * operand size.
4292 *
4293 * @param pIemCpu The IEM state.
4294 */
4295static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4296{
4297 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4298 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4299 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4300 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4301 else
4302 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4303}
4304
4305
4306/*
4307 *
4308 * Common opcode decoders.
4309 * Common opcode decoders.
4310 * Common opcode decoders.
4311 *
4312 */
4313//#include <iprt/mem.h>
4314
4315/**
4316 * Used to add extra details about a stub case.
4317 * @param pIemCpu The IEM per CPU state.
4318 */
4319static void iemOpStubMsg2(PIEMCPU pIemCpu)
4320{
4321#if defined(LOG_ENABLED) && defined(IN_RING3)
4322 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4323 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4324 char szRegs[4096];
4325 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4326 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4327 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4328 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4329 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4330 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4331 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4332 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4333 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4334 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4335 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4336 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4337 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4338 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4339 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4340 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4341 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4342 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4343 " efer=%016VR{efer}\n"
4344 " pat=%016VR{pat}\n"
4345 " sf_mask=%016VR{sf_mask}\n"
4346 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4347 " lstar=%016VR{lstar}\n"
4348 " star=%016VR{star} cstar=%016VR{cstar}\n"
4349 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4350 );
4351
4352 char szInstr[256];
4353 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4354 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4355 szInstr, sizeof(szInstr), NULL);
4356
4357 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4358#else
4359 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4360#endif
4361}
4362
4363/**
4364 * Complains about a stub.
4365 *
4366 * Providing two versions of this macro, one for daily use and one for use when
4367 * working on IEM.
4368 */
4369#if 0
4370# define IEMOP_BITCH_ABOUT_STUB() \
4371 do { \
4372 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4373 iemOpStubMsg2(pIemCpu); \
4374 RTAssertPanic(); \
4375 } while (0)
4376#else
4377# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4378#endif
4379
4380/** Stubs an opcode. */
4381#define FNIEMOP_STUB(a_Name) \
4382 FNIEMOP_DEF(a_Name) \
4383 { \
4384 IEMOP_BITCH_ABOUT_STUB(); \
4385 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4386 } \
4387 typedef int ignore_semicolon
4388
4389/** Stubs an opcode. */
4390#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4391 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4392 { \
4393 IEMOP_BITCH_ABOUT_STUB(); \
4394 NOREF(a_Name0); \
4395 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4396 } \
4397 typedef int ignore_semicolon
4398
4399/** Stubs an opcode which currently should raise \#UD. */
4400#define FNIEMOP_UD_STUB(a_Name) \
4401 FNIEMOP_DEF(a_Name) \
4402 { \
4403 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4404 return IEMOP_RAISE_INVALID_OPCODE(); \
4405 } \
4406 typedef int ignore_semicolon
4407
4408/** Stubs an opcode which currently should raise \#UD. */
4409#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4410 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4411 { \
4412 NOREF(a_Name0); \
4413 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4414 return IEMOP_RAISE_INVALID_OPCODE(); \
4415 } \
4416 typedef int ignore_semicolon
4417
4418
4419
4420/** @name Register Access.
4421 * @{
4422 */
4423
4424/**
4425 * Gets a reference (pointer) to the specified hidden segment register.
4426 *
4427 * @returns Hidden register reference.
4428 * @param pIemCpu The per CPU data.
4429 * @param iSegReg The segment register.
4430 */
4431static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4432{
4433 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4434 PCPUMSELREG pSReg;
4435 switch (iSegReg)
4436 {
4437 case X86_SREG_ES: pSReg = &pCtx->es; break;
4438 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4439 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4440 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4441 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4442 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4443 default:
4444 AssertFailedReturn(NULL);
4445 }
4446#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4447 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4448 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4449#else
4450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4451#endif
4452 return pSReg;
4453}
4454
4455
4456/**
4457 * Gets a reference (pointer) to the specified segment register (the selector
4458 * value).
4459 *
4460 * @returns Pointer to the selector variable.
4461 * @param pIemCpu The per CPU data.
4462 * @param iSegReg The segment register.
4463 */
4464static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4465{
4466 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4467 switch (iSegReg)
4468 {
4469 case X86_SREG_ES: return &pCtx->es.Sel;
4470 case X86_SREG_CS: return &pCtx->cs.Sel;
4471 case X86_SREG_SS: return &pCtx->ss.Sel;
4472 case X86_SREG_DS: return &pCtx->ds.Sel;
4473 case X86_SREG_FS: return &pCtx->fs.Sel;
4474 case X86_SREG_GS: return &pCtx->gs.Sel;
4475 }
4476 AssertFailedReturn(NULL);
4477}
4478
4479
4480/**
4481 * Fetches the selector value of a segment register.
4482 *
4483 * @returns The selector value.
4484 * @param pIemCpu The per CPU data.
4485 * @param iSegReg The segment register.
4486 */
4487static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4488{
4489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4490 switch (iSegReg)
4491 {
4492 case X86_SREG_ES: return pCtx->es.Sel;
4493 case X86_SREG_CS: return pCtx->cs.Sel;
4494 case X86_SREG_SS: return pCtx->ss.Sel;
4495 case X86_SREG_DS: return pCtx->ds.Sel;
4496 case X86_SREG_FS: return pCtx->fs.Sel;
4497 case X86_SREG_GS: return pCtx->gs.Sel;
4498 }
4499 AssertFailedReturn(0xffff);
4500}
4501
4502
4503/**
4504 * Gets a reference (pointer) to the specified general register.
4505 *
4506 * @returns Register reference.
4507 * @param pIemCpu The per CPU data.
4508 * @param iReg The general register.
4509 */
4510static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4511{
4512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4513 switch (iReg)
4514 {
4515 case X86_GREG_xAX: return &pCtx->rax;
4516 case X86_GREG_xCX: return &pCtx->rcx;
4517 case X86_GREG_xDX: return &pCtx->rdx;
4518 case X86_GREG_xBX: return &pCtx->rbx;
4519 case X86_GREG_xSP: return &pCtx->rsp;
4520 case X86_GREG_xBP: return &pCtx->rbp;
4521 case X86_GREG_xSI: return &pCtx->rsi;
4522 case X86_GREG_xDI: return &pCtx->rdi;
4523 case X86_GREG_x8: return &pCtx->r8;
4524 case X86_GREG_x9: return &pCtx->r9;
4525 case X86_GREG_x10: return &pCtx->r10;
4526 case X86_GREG_x11: return &pCtx->r11;
4527 case X86_GREG_x12: return &pCtx->r12;
4528 case X86_GREG_x13: return &pCtx->r13;
4529 case X86_GREG_x14: return &pCtx->r14;
4530 case X86_GREG_x15: return &pCtx->r15;
4531 }
4532 AssertFailedReturn(NULL);
4533}
4534
4535
4536/**
4537 * Gets a reference (pointer) to the specified 8-bit general register.
4538 *
4539 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4540 *
4541 * @returns Register reference.
4542 * @param pIemCpu The per CPU data.
4543 * @param iReg The register.
4544 */
4545static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4546{
4547 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4548 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4549
4550 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4551 if (iReg >= 4)
4552 pu8Reg++;
4553 return pu8Reg;
4554}
4555
4556
4557/**
4558 * Fetches the value of a 8-bit general register.
4559 *
4560 * @returns The register value.
4561 * @param pIemCpu The per CPU data.
4562 * @param iReg The register.
4563 */
4564static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4565{
4566 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4567 return *pbSrc;
4568}
4569
4570
4571/**
4572 * Fetches the value of a 16-bit general register.
4573 *
4574 * @returns The register value.
4575 * @param pIemCpu The per CPU data.
4576 * @param iReg The register.
4577 */
4578static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4579{
4580 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4581}
4582
4583
4584/**
4585 * Fetches the value of a 32-bit general register.
4586 *
4587 * @returns The register value.
4588 * @param pIemCpu The per CPU data.
4589 * @param iReg The register.
4590 */
4591static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4592{
4593 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4594}
4595
4596
4597/**
4598 * Fetches the value of a 64-bit general register.
4599 *
4600 * @returns The register value.
4601 * @param pIemCpu The per CPU data.
4602 * @param iReg The register.
4603 */
4604static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4605{
4606 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4607}
4608
4609
4610/**
4611 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4612 *
4613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4614 * segment limit.
4615 *
4616 * @param pIemCpu The per CPU data.
4617 * @param offNextInstr The offset of the next instruction.
4618 */
4619static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4620{
4621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4622 switch (pIemCpu->enmEffOpSize)
4623 {
4624 case IEMMODE_16BIT:
4625 {
4626 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4627 if ( uNewIp > pCtx->cs.u32Limit
4628 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4629 return iemRaiseGeneralProtectionFault0(pIemCpu);
4630 pCtx->rip = uNewIp;
4631 break;
4632 }
4633
4634 case IEMMODE_32BIT:
4635 {
4636 Assert(pCtx->rip <= UINT32_MAX);
4637 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4638
4639 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4640 if (uNewEip > pCtx->cs.u32Limit)
4641 return iemRaiseGeneralProtectionFault0(pIemCpu);
4642 pCtx->rip = uNewEip;
4643 break;
4644 }
4645
4646 case IEMMODE_64BIT:
4647 {
4648 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4649
4650 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4651 if (!IEM_IS_CANONICAL(uNewRip))
4652 return iemRaiseGeneralProtectionFault0(pIemCpu);
4653 pCtx->rip = uNewRip;
4654 break;
4655 }
4656
4657 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4658 }
4659
4660 pCtx->eflags.Bits.u1RF = 0;
4661 return VINF_SUCCESS;
4662}
4663
4664
4665/**
4666 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4667 *
4668 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4669 * segment limit.
4670 *
4671 * @returns Strict VBox status code.
4672 * @param pIemCpu The per CPU data.
4673 * @param offNextInstr The offset of the next instruction.
4674 */
4675static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4676{
4677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4678 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4679
4680 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4681 if ( uNewIp > pCtx->cs.u32Limit
4682 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4683 return iemRaiseGeneralProtectionFault0(pIemCpu);
4684 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4685 pCtx->rip = uNewIp;
4686 pCtx->eflags.Bits.u1RF = 0;
4687
4688 return VINF_SUCCESS;
4689}
4690
4691
4692/**
4693 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4694 *
4695 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4696 * segment limit.
4697 *
4698 * @returns Strict VBox status code.
4699 * @param pIemCpu The per CPU data.
4700 * @param offNextInstr The offset of the next instruction.
4701 */
4702static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4703{
4704 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4705 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4706
4707 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4708 {
4709 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4710
4711 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4712 if (uNewEip > pCtx->cs.u32Limit)
4713 return iemRaiseGeneralProtectionFault0(pIemCpu);
4714 pCtx->rip = uNewEip;
4715 }
4716 else
4717 {
4718 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4719
4720 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4721 if (!IEM_IS_CANONICAL(uNewRip))
4722 return iemRaiseGeneralProtectionFault0(pIemCpu);
4723 pCtx->rip = uNewRip;
4724 }
4725 pCtx->eflags.Bits.u1RF = 0;
4726 return VINF_SUCCESS;
4727}
4728
4729
4730/**
4731 * Performs a near jump to the specified address.
4732 *
4733 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4734 * segment limit.
4735 *
4736 * @param pIemCpu The per CPU data.
4737 * @param uNewRip The new RIP value.
4738 */
4739static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4740{
4741 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4742 switch (pIemCpu->enmEffOpSize)
4743 {
4744 case IEMMODE_16BIT:
4745 {
4746 Assert(uNewRip <= UINT16_MAX);
4747 if ( uNewRip > pCtx->cs.u32Limit
4748 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4749 return iemRaiseGeneralProtectionFault0(pIemCpu);
4750 /** @todo Test 16-bit jump in 64-bit mode. */
4751 pCtx->rip = uNewRip;
4752 break;
4753 }
4754
4755 case IEMMODE_32BIT:
4756 {
4757 Assert(uNewRip <= UINT32_MAX);
4758 Assert(pCtx->rip <= UINT32_MAX);
4759 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4760
4761 if (uNewRip > pCtx->cs.u32Limit)
4762 return iemRaiseGeneralProtectionFault0(pIemCpu);
4763 pCtx->rip = uNewRip;
4764 break;
4765 }
4766
4767 case IEMMODE_64BIT:
4768 {
4769 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4770
4771 if (!IEM_IS_CANONICAL(uNewRip))
4772 return iemRaiseGeneralProtectionFault0(pIemCpu);
4773 pCtx->rip = uNewRip;
4774 break;
4775 }
4776
4777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4778 }
4779
4780 pCtx->eflags.Bits.u1RF = 0;
4781 return VINF_SUCCESS;
4782}
4783
4784
4785/**
4786 * Get the address of the top of the stack.
4787 *
4788 * @param pIemCpu The per CPU data.
4789 * @param pCtx The CPU context which SP/ESP/RSP should be
4790 * read.
4791 */
4792DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4793{
4794 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4795 return pCtx->rsp;
4796 if (pCtx->ss.Attr.n.u1DefBig)
4797 return pCtx->esp;
4798 return pCtx->sp;
4799}
4800
4801
4802/**
4803 * Updates the RIP/EIP/IP to point to the next instruction.
4804 *
4805 * This function leaves the EFLAGS.RF flag alone.
4806 *
4807 * @param pIemCpu The per CPU data.
4808 * @param cbInstr The number of bytes to add.
4809 */
4810static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4811{
4812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4813 switch (pIemCpu->enmCpuMode)
4814 {
4815 case IEMMODE_16BIT:
4816 Assert(pCtx->rip <= UINT16_MAX);
4817 pCtx->eip += cbInstr;
4818 pCtx->eip &= UINT32_C(0xffff);
4819 break;
4820
4821 case IEMMODE_32BIT:
4822 pCtx->eip += cbInstr;
4823 Assert(pCtx->rip <= UINT32_MAX);
4824 break;
4825
4826 case IEMMODE_64BIT:
4827 pCtx->rip += cbInstr;
4828 break;
4829 default: AssertFailed();
4830 }
4831}
4832
4833
4834#if 0
4835/**
4836 * Updates the RIP/EIP/IP to point to the next instruction.
4837 *
4838 * @param pIemCpu The per CPU data.
4839 */
4840static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4841{
4842 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4843}
4844#endif
4845
4846
4847
4848/**
4849 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4850 *
4851 * @param pIemCpu The per CPU data.
4852 * @param cbInstr The number of bytes to add.
4853 */
4854static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4855{
4856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4857
4858 pCtx->eflags.Bits.u1RF = 0;
4859
4860 switch (pIemCpu->enmCpuMode)
4861 {
4862 case IEMMODE_16BIT:
4863 Assert(pCtx->rip <= UINT16_MAX);
4864 pCtx->eip += cbInstr;
4865 pCtx->eip &= UINT32_C(0xffff);
4866 break;
4867
4868 case IEMMODE_32BIT:
4869 pCtx->eip += cbInstr;
4870 Assert(pCtx->rip <= UINT32_MAX);
4871 break;
4872
4873 case IEMMODE_64BIT:
4874 pCtx->rip += cbInstr;
4875 break;
4876 default: AssertFailed();
4877 }
4878}
4879
4880
4881/**
4882 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4883 *
4884 * @param pIemCpu The per CPU data.
4885 */
4886static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4887{
4888 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4889}
4890
4891
4892/**
4893 * Adds to the stack pointer.
4894 *
4895 * @param pIemCpu The per CPU data.
4896 * @param pCtx The CPU context which SP/ESP/RSP should be
4897 * updated.
4898 * @param cbToAdd The number of bytes to add.
4899 */
4900DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4901{
4902 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4903 pCtx->rsp += cbToAdd;
4904 else if (pCtx->ss.Attr.n.u1DefBig)
4905 pCtx->esp += cbToAdd;
4906 else
4907 pCtx->sp += cbToAdd;
4908}
4909
4910
4911/**
4912 * Subtracts from the stack pointer.
4913 *
4914 * @param pIemCpu The per CPU data.
4915 * @param pCtx The CPU context which SP/ESP/RSP should be
4916 * updated.
4917 * @param cbToSub The number of bytes to subtract.
4918 */
4919DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4920{
4921 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4922 pCtx->rsp -= cbToSub;
4923 else if (pCtx->ss.Attr.n.u1DefBig)
4924 pCtx->esp -= cbToSub;
4925 else
4926 pCtx->sp -= cbToSub;
4927}
4928
4929
4930/**
4931 * Adds to the temporary stack pointer.
4932 *
4933 * @param pIemCpu The per CPU data.
4934 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4935 * @param cbToAdd The number of bytes to add.
4936 * @param pCtx Where to get the current stack mode.
4937 */
4938DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4939{
4940 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4941 pTmpRsp->u += cbToAdd;
4942 else if (pCtx->ss.Attr.n.u1DefBig)
4943 pTmpRsp->DWords.dw0 += cbToAdd;
4944 else
4945 pTmpRsp->Words.w0 += cbToAdd;
4946}
4947
4948
4949/**
4950 * Subtracts from the temporary stack pointer.
4951 *
4952 * @param pIemCpu The per CPU data.
4953 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4954 * @param cbToSub The number of bytes to subtract.
4955 * @param pCtx Where to get the current stack mode.
4956 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4957 * expecting that.
4958 */
4959DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
4960{
4961 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4962 pTmpRsp->u -= cbToSub;
4963 else if (pCtx->ss.Attr.n.u1DefBig)
4964 pTmpRsp->DWords.dw0 -= cbToSub;
4965 else
4966 pTmpRsp->Words.w0 -= cbToSub;
4967}
4968
4969
4970/**
4971 * Calculates the effective stack address for a push of the specified size as
4972 * well as the new RSP value (upper bits may be masked).
4973 *
4974 * @returns Effective stack addressf for the push.
4975 * @param pIemCpu The IEM per CPU data.
4976 * @param pCtx Where to get the current stack mode.
4977 * @param cbItem The size of the stack item to pop.
4978 * @param puNewRsp Where to return the new RSP value.
4979 */
4980DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
4981{
4982 RTUINT64U uTmpRsp;
4983 RTGCPTR GCPtrTop;
4984 uTmpRsp.u = pCtx->rsp;
4985
4986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4987 GCPtrTop = uTmpRsp.u -= cbItem;
4988 else if (pCtx->ss.Attr.n.u1DefBig)
4989 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
4990 else
4991 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
4992 *puNewRsp = uTmpRsp.u;
4993 return GCPtrTop;
4994}
4995
4996
4997/**
4998 * Gets the current stack pointer and calculates the value after a pop of the
4999 * specified size.
5000 *
5001 * @returns Current stack pointer.
5002 * @param pIemCpu The per CPU data.
5003 * @param pCtx Where to get the current stack mode.
5004 * @param cbItem The size of the stack item to pop.
5005 * @param puNewRsp Where to return the new RSP value.
5006 */
5007DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5008{
5009 RTUINT64U uTmpRsp;
5010 RTGCPTR GCPtrTop;
5011 uTmpRsp.u = pCtx->rsp;
5012
5013 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5014 {
5015 GCPtrTop = uTmpRsp.u;
5016 uTmpRsp.u += cbItem;
5017 }
5018 else if (pCtx->ss.Attr.n.u1DefBig)
5019 {
5020 GCPtrTop = uTmpRsp.DWords.dw0;
5021 uTmpRsp.DWords.dw0 += cbItem;
5022 }
5023 else
5024 {
5025 GCPtrTop = uTmpRsp.Words.w0;
5026 uTmpRsp.Words.w0 += cbItem;
5027 }
5028 *puNewRsp = uTmpRsp.u;
5029 return GCPtrTop;
5030}
5031
5032
5033/**
5034 * Calculates the effective stack address for a push of the specified size as
5035 * well as the new temporary RSP value (upper bits may be masked).
5036 *
5037 * @returns Effective stack addressf for the push.
5038 * @param pIemCpu The per CPU data.
5039 * @param pTmpRsp The temporary stack pointer. This is updated.
5040 * @param cbItem The size of the stack item to pop.
5041 * @param puNewRsp Where to return the new RSP value.
5042 */
5043DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5044{
5045 RTGCPTR GCPtrTop;
5046
5047 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5048 GCPtrTop = pTmpRsp->u -= cbItem;
5049 else if (pCtx->ss.Attr.n.u1DefBig)
5050 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5051 else
5052 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5053 return GCPtrTop;
5054}
5055
5056
5057/**
5058 * Gets the effective stack address for a pop of the specified size and
5059 * calculates and updates the temporary RSP.
5060 *
5061 * @returns Current stack pointer.
5062 * @param pIemCpu The per CPU data.
5063 * @param pTmpRsp The temporary stack pointer. This is updated.
5064 * @param pCtx Where to get the current stack mode.
5065 * @param cbItem The size of the stack item to pop.
5066 */
5067DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5068{
5069 RTGCPTR GCPtrTop;
5070 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5071 {
5072 GCPtrTop = pTmpRsp->u;
5073 pTmpRsp->u += cbItem;
5074 }
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 {
5077 GCPtrTop = pTmpRsp->DWords.dw0;
5078 pTmpRsp->DWords.dw0 += cbItem;
5079 }
5080 else
5081 {
5082 GCPtrTop = pTmpRsp->Words.w0;
5083 pTmpRsp->Words.w0 += cbItem;
5084 }
5085 return GCPtrTop;
5086}
5087
5088/** @} */
5089
5090
5091/** @name FPU access and helpers.
5092 *
5093 * @{
5094 */
5095
5096
5097/**
5098 * Hook for preparing to use the host FPU.
5099 *
5100 * This is necessary in ring-0 and raw-mode context.
5101 *
5102 * @param pIemCpu The IEM per CPU data.
5103 */
5104DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5105{
5106#ifdef IN_RING3
5107 NOREF(pIemCpu);
5108#else
5109/** @todo RZ: FIXME */
5110//# error "Implement me"
5111#endif
5112}
5113
5114
5115/**
5116 * Hook for preparing to use the host FPU for SSE
5117 *
5118 * This is necessary in ring-0 and raw-mode context.
5119 *
5120 * @param pIemCpu The IEM per CPU data.
5121 */
5122DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5123{
5124 iemFpuPrepareUsage(pIemCpu);
5125}
5126
5127
5128/**
5129 * Stores a QNaN value into a FPU register.
5130 *
5131 * @param pReg Pointer to the register.
5132 */
5133DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5134{
5135 pReg->au32[0] = UINT32_C(0x00000000);
5136 pReg->au32[1] = UINT32_C(0xc0000000);
5137 pReg->au16[4] = UINT16_C(0xffff);
5138}
5139
5140
5141/**
5142 * Updates the FOP, FPU.CS and FPUIP registers.
5143 *
5144 * @param pIemCpu The IEM per CPU data.
5145 * @param pCtx The CPU context.
5146 * @param pFpuCtx The FPU context.
5147 */
5148DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5149{
5150 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5151 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5152 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5153 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5154 {
5155 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5156 * happens in real mode here based on the fnsave and fnstenv images. */
5157 pFpuCtx->CS = 0;
5158 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5159 }
5160 else
5161 {
5162 pFpuCtx->CS = pCtx->cs.Sel;
5163 pFpuCtx->FPUIP = pCtx->rip;
5164 }
5165}
5166
5167
5168/**
5169 * Updates the x87.DS and FPUDP registers.
5170 *
5171 * @param pIemCpu The IEM per CPU data.
5172 * @param pCtx The CPU context.
5173 * @param pFpuCtx The FPU context.
5174 * @param iEffSeg The effective segment register.
5175 * @param GCPtrEff The effective address relative to @a iEffSeg.
5176 */
5177DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5178{
5179 RTSEL sel;
5180 switch (iEffSeg)
5181 {
5182 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5183 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5184 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5185 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5186 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5187 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5188 default:
5189 AssertMsgFailed(("%d\n", iEffSeg));
5190 sel = pCtx->ds.Sel;
5191 }
5192 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5193 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5194 {
5195 pFpuCtx->DS = 0;
5196 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5197 }
5198 else
5199 {
5200 pFpuCtx->DS = sel;
5201 pFpuCtx->FPUDP = GCPtrEff;
5202 }
5203}
5204
5205
5206/**
5207 * Rotates the stack registers in the push direction.
5208 *
5209 * @param pFpuCtx The FPU context.
5210 * @remarks This is a complete waste of time, but fxsave stores the registers in
5211 * stack order.
5212 */
5213DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5214{
5215 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5216 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5217 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5218 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5219 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5220 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5221 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5222 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5223 pFpuCtx->aRegs[0].r80 = r80Tmp;
5224}
5225
5226
5227/**
5228 * Rotates the stack registers in the pop direction.
5229 *
5230 * @param pFpuCtx The FPU context.
5231 * @remarks This is a complete waste of time, but fxsave stores the registers in
5232 * stack order.
5233 */
5234DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5235{
5236 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5237 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5238 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5239 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5240 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5241 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5242 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5243 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5244 pFpuCtx->aRegs[7].r80 = r80Tmp;
5245}
5246
5247
5248/**
5249 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5250 * exception prevents it.
5251 *
5252 * @param pIemCpu The IEM per CPU data.
5253 * @param pResult The FPU operation result to push.
5254 * @param pFpuCtx The FPU context.
5255 */
5256static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5257{
5258 /* Update FSW and bail if there are pending exceptions afterwards. */
5259 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5260 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5261 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5262 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5263 {
5264 pFpuCtx->FSW = fFsw;
5265 return;
5266 }
5267
5268 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5269 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5270 {
5271 /* All is fine, push the actual value. */
5272 pFpuCtx->FTW |= RT_BIT(iNewTop);
5273 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5274 }
5275 else if (pFpuCtx->FCW & X86_FCW_IM)
5276 {
5277 /* Masked stack overflow, push QNaN. */
5278 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5279 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5280 }
5281 else
5282 {
5283 /* Raise stack overflow, don't push anything. */
5284 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5285 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5286 return;
5287 }
5288
5289 fFsw &= ~X86_FSW_TOP_MASK;
5290 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5291 pFpuCtx->FSW = fFsw;
5292
5293 iemFpuRotateStackPush(pFpuCtx);
5294}
5295
5296
5297/**
5298 * Stores a result in a FPU register and updates the FSW and FTW.
5299 *
5300 * @param pFpuCtx The FPU context.
5301 * @param pResult The result to store.
5302 * @param iStReg Which FPU register to store it in.
5303 */
5304static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5305{
5306 Assert(iStReg < 8);
5307 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5308 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5309 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5310 pFpuCtx->FTW |= RT_BIT(iReg);
5311 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5312}
5313
5314
5315/**
5316 * Only updates the FPU status word (FSW) with the result of the current
5317 * instruction.
5318 *
5319 * @param pFpuCtx The FPU context.
5320 * @param u16FSW The FSW output of the current instruction.
5321 */
5322static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5323{
5324 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5325 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5326}
5327
5328
5329/**
5330 * Pops one item off the FPU stack if no pending exception prevents it.
5331 *
5332 * @param pFpuCtx The FPU context.
5333 */
5334static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5335{
5336 /* Check pending exceptions. */
5337 uint16_t uFSW = pFpuCtx->FSW;
5338 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5339 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5340 return;
5341
5342 /* TOP--. */
5343 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5344 uFSW &= ~X86_FSW_TOP_MASK;
5345 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5346 pFpuCtx->FSW = uFSW;
5347
5348 /* Mark the previous ST0 as empty. */
5349 iOldTop >>= X86_FSW_TOP_SHIFT;
5350 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5351
5352 /* Rotate the registers. */
5353 iemFpuRotateStackPop(pFpuCtx);
5354}
5355
5356
5357/**
5358 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5359 *
5360 * @param pIemCpu The IEM per CPU data.
5361 * @param pResult The FPU operation result to push.
5362 */
5363static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5364{
5365 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5366 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5367 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5368 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5369}
5370
5371
5372/**
5373 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5374 * and sets FPUDP and FPUDS.
5375 *
5376 * @param pIemCpu The IEM per CPU data.
5377 * @param pResult The FPU operation result to push.
5378 * @param iEffSeg The effective segment register.
5379 * @param GCPtrEff The effective address relative to @a iEffSeg.
5380 */
5381static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5382{
5383 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5384 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5385 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5386 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5387 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5388}
5389
5390
5391/**
5392 * Replace ST0 with the first value and push the second onto the FPU stack,
5393 * unless a pending exception prevents it.
5394 *
5395 * @param pIemCpu The IEM per CPU data.
5396 * @param pResult The FPU operation result to store and push.
5397 */
5398static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5399{
5400 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5401 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5402 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5403
5404 /* Update FSW and bail if there are pending exceptions afterwards. */
5405 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5406 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5407 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5408 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5409 {
5410 pFpuCtx->FSW = fFsw;
5411 return;
5412 }
5413
5414 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5415 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5416 {
5417 /* All is fine, push the actual value. */
5418 pFpuCtx->FTW |= RT_BIT(iNewTop);
5419 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5420 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5421 }
5422 else if (pFpuCtx->FCW & X86_FCW_IM)
5423 {
5424 /* Masked stack overflow, push QNaN. */
5425 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5426 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5427 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5428 }
5429 else
5430 {
5431 /* Raise stack overflow, don't push anything. */
5432 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5433 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5434 return;
5435 }
5436
5437 fFsw &= ~X86_FSW_TOP_MASK;
5438 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5439 pFpuCtx->FSW = fFsw;
5440
5441 iemFpuRotateStackPush(pFpuCtx);
5442}
5443
5444
5445/**
5446 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5447 * FOP.
5448 *
5449 * @param pIemCpu The IEM per CPU data.
5450 * @param pResult The result to store.
5451 * @param iStReg Which FPU register to store it in.
5452 * @param pCtx The CPU context.
5453 */
5454static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5455{
5456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5457 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5458 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5459 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5460}
5461
5462
5463/**
5464 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5465 * FOP, and then pops the stack.
5466 *
5467 * @param pIemCpu The IEM per CPU data.
5468 * @param pResult The result to store.
5469 * @param iStReg Which FPU register to store it in.
5470 * @param pCtx The CPU context.
5471 */
5472static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5473{
5474 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5475 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5476 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5477 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5478 iemFpuMaybePopOne(pFpuCtx);
5479}
5480
5481
5482/**
5483 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5484 * FPUDP, and FPUDS.
5485 *
5486 * @param pIemCpu The IEM per CPU data.
5487 * @param pResult The result to store.
5488 * @param iStReg Which FPU register to store it in.
5489 * @param pCtx The CPU context.
5490 * @param iEffSeg The effective memory operand selector register.
5491 * @param GCPtrEff The effective memory operand offset.
5492 */
5493static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5494{
5495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5497 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5500}
5501
5502
5503/**
5504 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5505 * FPUDP, and FPUDS, and then pops the stack.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The result to store.
5509 * @param iStReg Which FPU register to store it in.
5510 * @param pCtx The CPU context.
5511 * @param iEffSeg The effective memory operand selector register.
5512 * @param GCPtrEff The effective memory operand offset.
5513 */
5514static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5515 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5516{
5517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5518 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5519 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5520 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5521 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5522 iemFpuMaybePopOne(pFpuCtx);
5523}
5524
5525
5526/**
5527 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5528 *
5529 * @param pIemCpu The IEM per CPU data.
5530 */
5531static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5532{
5533 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5534 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5535 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5536}
5537
5538
5539/**
5540 * Marks the specified stack register as free (for FFREE).
5541 *
5542 * @param pIemCpu The IEM per CPU data.
5543 * @param iStReg The register to free.
5544 */
5545static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5546{
5547 Assert(iStReg < 8);
5548 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5549 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5550 pFpuCtx->FTW &= ~RT_BIT(iReg);
5551}
5552
5553
5554/**
5555 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5556 *
5557 * @param pIemCpu The IEM per CPU data.
5558 */
5559static void iemFpuStackIncTop(PIEMCPU pIemCpu)
5560{
5561 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5562 uint16_t uFsw = pFpuCtx->FSW;
5563 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5564 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5565 uFsw &= ~X86_FSW_TOP_MASK;
5566 uFsw |= uTop;
5567 pFpuCtx->FSW = uFsw;
5568}
5569
5570
5571/**
5572 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5573 *
5574 * @param pIemCpu The IEM per CPU data.
5575 */
5576static void iemFpuStackDecTop(PIEMCPU pIemCpu)
5577{
5578 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5579 uint16_t uFsw = pFpuCtx->FSW;
5580 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5581 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5582 uFsw &= ~X86_FSW_TOP_MASK;
5583 uFsw |= uTop;
5584 pFpuCtx->FSW = uFsw;
5585}
5586
5587
5588/**
5589 * Updates the FSW, FOP, FPUIP, and FPUCS.
5590 *
5591 * @param pIemCpu The IEM per CPU data.
5592 * @param u16FSW The FSW from the current instruction.
5593 */
5594static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5595{
5596 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5597 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5598 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5599 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5600}
5601
5602
5603/**
5604 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5605 *
5606 * @param pIemCpu The IEM per CPU data.
5607 * @param u16FSW The FSW from the current instruction.
5608 */
5609static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5610{
5611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5612 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5613 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5614 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5615 iemFpuMaybePopOne(pFpuCtx);
5616}
5617
5618
5619/**
5620 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5621 *
5622 * @param pIemCpu The IEM per CPU data.
5623 * @param u16FSW The FSW from the current instruction.
5624 * @param iEffSeg The effective memory operand selector register.
5625 * @param GCPtrEff The effective memory operand offset.
5626 */
5627static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5628{
5629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5630 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5631 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5632 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5633 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5634}
5635
5636
5637/**
5638 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5639 *
5640 * @param pIemCpu The IEM per CPU data.
5641 * @param u16FSW The FSW from the current instruction.
5642 */
5643static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5644{
5645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5646 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5647 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5648 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5649 iemFpuMaybePopOne(pFpuCtx);
5650 iemFpuMaybePopOne(pFpuCtx);
5651}
5652
5653
5654/**
5655 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5656 *
5657 * @param pIemCpu The IEM per CPU data.
5658 * @param u16FSW The FSW from the current instruction.
5659 * @param iEffSeg The effective memory operand selector register.
5660 * @param GCPtrEff The effective memory operand offset.
5661 */
5662static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5663{
5664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5665 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5666 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5667 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5668 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5669 iemFpuMaybePopOne(pFpuCtx);
5670}
5671
5672
5673/**
5674 * Worker routine for raising an FPU stack underflow exception.
5675 *
5676 * @param pIemCpu The IEM per CPU data.
5677 * @param pFpuCtx The FPU context.
5678 * @param iStReg The stack register being accessed.
5679 */
5680static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5681{
5682 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5683 if (pFpuCtx->FCW & X86_FCW_IM)
5684 {
5685 /* Masked underflow. */
5686 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5687 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5688 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5689 if (iStReg != UINT8_MAX)
5690 {
5691 pFpuCtx->FTW |= RT_BIT(iReg);
5692 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5693 }
5694 }
5695 else
5696 {
5697 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5699 }
5700}
5701
5702
5703/**
5704 * Raises a FPU stack underflow exception.
5705 *
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param iStReg The destination register that should be loaded
5708 * with QNaN if \#IS is not masked. Specify
5709 * UINT8_MAX if none (like for fcom).
5710 */
5711DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5712{
5713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5714 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5715 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5716 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5717}
5718
5719
5720DECL_NO_INLINE(static, void)
5721iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5722{
5723 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5724 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5725 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5726 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5727 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5728}
5729
5730
5731DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5732{
5733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5735 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5736 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5737 iemFpuMaybePopOne(pFpuCtx);
5738}
5739
5740
5741DECL_NO_INLINE(static, void)
5742iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5743{
5744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5745 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5746 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5747 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5748 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5749 iemFpuMaybePopOne(pFpuCtx);
5750}
5751
5752
5753DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5754{
5755 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5756 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5757 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5758 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5759 iemFpuMaybePopOne(pFpuCtx);
5760 iemFpuMaybePopOne(pFpuCtx);
5761}
5762
5763
5764DECL_NO_INLINE(static, void)
5765iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5766{
5767 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5768 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5769 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5770
5771 if (pFpuCtx->FCW & X86_FCW_IM)
5772 {
5773 /* Masked overflow - Push QNaN. */
5774 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5775 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5776 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5777 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5778 pFpuCtx->FTW |= RT_BIT(iNewTop);
5779 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5780 iemFpuRotateStackPush(pFpuCtx);
5781 }
5782 else
5783 {
5784 /* Exception pending - don't change TOP or the register stack. */
5785 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5786 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5787 }
5788}
5789
5790
5791DECL_NO_INLINE(static, void)
5792iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5793{
5794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5795 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5796 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5797
5798 if (pFpuCtx->FCW & X86_FCW_IM)
5799 {
5800 /* Masked overflow - Push QNaN. */
5801 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5802 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5803 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5804 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5805 pFpuCtx->FTW |= RT_BIT(iNewTop);
5806 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5807 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5808 iemFpuRotateStackPush(pFpuCtx);
5809 }
5810 else
5811 {
5812 /* Exception pending - don't change TOP or the register stack. */
5813 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5814 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5815 }
5816}
5817
5818
5819/**
5820 * Worker routine for raising an FPU stack overflow exception on a push.
5821 *
5822 * @param pFpuCtx The FPU context.
5823 */
5824static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5825{
5826 if (pFpuCtx->FCW & X86_FCW_IM)
5827 {
5828 /* Masked overflow. */
5829 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5830 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5831 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5832 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5833 pFpuCtx->FTW |= RT_BIT(iNewTop);
5834 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5835 iemFpuRotateStackPush(pFpuCtx);
5836 }
5837 else
5838 {
5839 /* Exception pending - don't change TOP or the register stack. */
5840 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5841 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5842 }
5843}
5844
5845
5846/**
5847 * Raises a FPU stack overflow exception on a push.
5848 *
5849 * @param pIemCpu The IEM per CPU data.
5850 */
5851DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5852{
5853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5855 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5856 iemFpuStackPushOverflowOnly(pFpuCtx);
5857}
5858
5859
5860/**
5861 * Raises a FPU stack overflow exception on a push with a memory operand.
5862 *
5863 * @param pIemCpu The IEM per CPU data.
5864 * @param iEffSeg The effective memory operand selector register.
5865 * @param GCPtrEff The effective memory operand offset.
5866 */
5867DECL_NO_INLINE(static, void)
5868iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5869{
5870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5871 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5872 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5873 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5874 iemFpuStackPushOverflowOnly(pFpuCtx);
5875}
5876
5877
5878static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5879{
5880 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5881 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5882 if (pFpuCtx->FTW & RT_BIT(iReg))
5883 return VINF_SUCCESS;
5884 return VERR_NOT_FOUND;
5885}
5886
5887
5888static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5889{
5890 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5891 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5892 if (pFpuCtx->FTW & RT_BIT(iReg))
5893 {
5894 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5895 return VINF_SUCCESS;
5896 }
5897 return VERR_NOT_FOUND;
5898}
5899
5900
5901static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5902 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5903{
5904 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5905 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5906 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5907 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5908 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5909 {
5910 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5911 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5912 return VINF_SUCCESS;
5913 }
5914 return VERR_NOT_FOUND;
5915}
5916
5917
5918static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5919{
5920 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5921 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5922 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5923 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5924 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5925 {
5926 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5927 return VINF_SUCCESS;
5928 }
5929 return VERR_NOT_FOUND;
5930}
5931
5932
5933/**
5934 * Updates the FPU exception status after FCW is changed.
5935 *
5936 * @param pFpuCtx The FPU context.
5937 */
5938static void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5939{
5940 uint16_t u16Fsw = pFpuCtx->FSW;
5941 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5942 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5943 else
5944 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5945 pFpuCtx->FSW = u16Fsw;
5946}
5947
5948
5949/**
5950 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5951 *
5952 * @returns The full FTW.
5953 * @param pFpuCtx The FPU context.
5954 */
5955static uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
5956{
5957 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
5958 uint16_t u16Ftw = 0;
5959 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5960 for (unsigned iSt = 0; iSt < 8; iSt++)
5961 {
5962 unsigned const iReg = (iSt + iTop) & 7;
5963 if (!(u8Ftw & RT_BIT(iReg)))
5964 u16Ftw |= 3 << (iReg * 2); /* empty */
5965 else
5966 {
5967 uint16_t uTag;
5968 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
5969 if (pr80Reg->s.uExponent == 0x7fff)
5970 uTag = 2; /* Exponent is all 1's => Special. */
5971 else if (pr80Reg->s.uExponent == 0x0000)
5972 {
5973 if (pr80Reg->s.u64Mantissa == 0x0000)
5974 uTag = 1; /* All bits are zero => Zero. */
5975 else
5976 uTag = 2; /* Must be special. */
5977 }
5978 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
5979 uTag = 0; /* Valid. */
5980 else
5981 uTag = 2; /* Must be special. */
5982
5983 u16Ftw |= uTag << (iReg * 2); /* empty */
5984 }
5985 }
5986
5987 return u16Ftw;
5988}
5989
5990
5991/**
5992 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
5993 *
5994 * @returns The compressed FTW.
5995 * @param u16FullFtw The full FTW to convert.
5996 */
5997static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
5998{
5999 uint8_t u8Ftw = 0;
6000 for (unsigned i = 0; i < 8; i++)
6001 {
6002 if ((u16FullFtw & 3) != 3 /*empty*/)
6003 u8Ftw |= RT_BIT(i);
6004 u16FullFtw >>= 2;
6005 }
6006
6007 return u8Ftw;
6008}
6009
6010/** @} */
6011
6012
6013/** @name Memory access.
6014 *
6015 * @{
6016 */
6017
6018
6019/**
6020 * Updates the IEMCPU::cbWritten counter if applicable.
6021 *
6022 * @param pIemCpu The IEM per CPU data.
6023 * @param fAccess The access being accounted for.
6024 * @param cbMem The access size.
6025 */
6026DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6027{
6028 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6029 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6030 pIemCpu->cbWritten += (uint32_t)cbMem;
6031}
6032
6033
6034/**
6035 * Checks if the given segment can be written to, raise the appropriate
6036 * exception if not.
6037 *
6038 * @returns VBox strict status code.
6039 *
6040 * @param pIemCpu The IEM per CPU data.
6041 * @param pHid Pointer to the hidden register.
6042 * @param iSegReg The register number.
6043 * @param pu64BaseAddr Where to return the base address to use for the
6044 * segment. (In 64-bit code it may differ from the
6045 * base in the hidden segment.)
6046 */
6047static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6048{
6049 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6050 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6051 else
6052 {
6053 if (!pHid->Attr.n.u1Present)
6054 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6055
6056 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6057 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6058 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6059 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6060 *pu64BaseAddr = pHid->u64Base;
6061 }
6062 return VINF_SUCCESS;
6063}
6064
6065
6066/**
6067 * Checks if the given segment can be read from, raise the appropriate
6068 * exception if not.
6069 *
6070 * @returns VBox strict status code.
6071 *
6072 * @param pIemCpu The IEM per CPU data.
6073 * @param pHid Pointer to the hidden register.
6074 * @param iSegReg The register number.
6075 * @param pu64BaseAddr Where to return the base address to use for the
6076 * segment. (In 64-bit code it may differ from the
6077 * base in the hidden segment.)
6078 */
6079static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6080{
6081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6082 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6083 else
6084 {
6085 if (!pHid->Attr.n.u1Present)
6086 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6087
6088 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6089 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6090 *pu64BaseAddr = pHid->u64Base;
6091 }
6092 return VINF_SUCCESS;
6093}
6094
6095
6096/**
6097 * Applies the segment limit, base and attributes.
6098 *
6099 * This may raise a \#GP or \#SS.
6100 *
6101 * @returns VBox strict status code.
6102 *
6103 * @param pIemCpu The IEM per CPU data.
6104 * @param fAccess The kind of access which is being performed.
6105 * @param iSegReg The index of the segment register to apply.
6106 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6107 * TSS, ++).
6108 * @param pGCPtrMem Pointer to the guest memory address to apply
6109 * segmentation to. Input and output parameter.
6110 */
6111static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
6112 size_t cbMem, PRTGCPTR pGCPtrMem)
6113{
6114 if (iSegReg == UINT8_MAX)
6115 return VINF_SUCCESS;
6116
6117 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6118 switch (pIemCpu->enmCpuMode)
6119 {
6120 case IEMMODE_16BIT:
6121 case IEMMODE_32BIT:
6122 {
6123 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6124 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6125
6126 Assert(pSel->Attr.n.u1Present);
6127 Assert(pSel->Attr.n.u1DescType);
6128 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6129 {
6130 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6131 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6132 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6133
6134 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6135 {
6136 /** @todo CPL check. */
6137 }
6138
6139 /*
6140 * There are two kinds of data selectors, normal and expand down.
6141 */
6142 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6143 {
6144 if ( GCPtrFirst32 > pSel->u32Limit
6145 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6146 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6147 }
6148 else
6149 {
6150 /*
6151 * The upper boundary is defined by the B bit, not the G bit!
6152 */
6153 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6154 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6155 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6156 }
6157 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6158 }
6159 else
6160 {
6161
6162 /*
6163 * Code selector and usually be used to read thru, writing is
6164 * only permitted in real and V8086 mode.
6165 */
6166 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6167 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6168 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6169 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6170 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6171
6172 if ( GCPtrFirst32 > pSel->u32Limit
6173 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6174 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6175
6176 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6177 {
6178 /** @todo CPL check. */
6179 }
6180
6181 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6182 }
6183 return VINF_SUCCESS;
6184 }
6185
6186 case IEMMODE_64BIT:
6187 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6188 *pGCPtrMem += pSel->u64Base;
6189 return VINF_SUCCESS;
6190
6191 default:
6192 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6193 }
6194}
6195
6196
6197/**
6198 * Translates a virtual address to a physical physical address and checks if we
6199 * can access the page as specified.
6200 *
6201 * @param pIemCpu The IEM per CPU data.
6202 * @param GCPtrMem The virtual address.
6203 * @param fAccess The intended access.
6204 * @param pGCPhysMem Where to return the physical address.
6205 */
6206static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
6207 PRTGCPHYS pGCPhysMem)
6208{
6209 /** @todo Need a different PGM interface here. We're currently using
6210 * generic / REM interfaces. this won't cut it for R0 & RC. */
6211 RTGCPHYS GCPhys;
6212 uint64_t fFlags;
6213 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6214 if (RT_FAILURE(rc))
6215 {
6216 /** @todo Check unassigned memory in unpaged mode. */
6217 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6218 *pGCPhysMem = NIL_RTGCPHYS;
6219 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6220 }
6221
6222 /* If the page is writable and does not have the no-exec bit set, all
6223 access is allowed. Otherwise we'll have to check more carefully... */
6224 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6225 {
6226 /* Write to read only memory? */
6227 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6228 && !(fFlags & X86_PTE_RW)
6229 && ( pIemCpu->uCpl != 0
6230 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6231 {
6232 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6233 *pGCPhysMem = NIL_RTGCPHYS;
6234 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6235 }
6236
6237 /* Kernel memory accessed by userland? */
6238 if ( !(fFlags & X86_PTE_US)
6239 && pIemCpu->uCpl == 3
6240 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6241 {
6242 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6243 *pGCPhysMem = NIL_RTGCPHYS;
6244 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6245 }
6246
6247 /* Executing non-executable memory? */
6248 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6249 && (fFlags & X86_PTE_PAE_NX)
6250 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6251 {
6252 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6253 *pGCPhysMem = NIL_RTGCPHYS;
6254 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6255 VERR_ACCESS_DENIED);
6256 }
6257 }
6258
6259 /*
6260 * Set the dirty / access flags.
6261 * ASSUMES this is set when the address is translated rather than on committ...
6262 */
6263 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6264 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6265 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6266 {
6267 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6268 AssertRC(rc2);
6269 }
6270
6271 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6272 *pGCPhysMem = GCPhys;
6273 return VINF_SUCCESS;
6274}
6275
6276
6277
6278/**
6279 * Maps a physical page.
6280 *
6281 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6282 * @param pIemCpu The IEM per CPU data.
6283 * @param GCPhysMem The physical address.
6284 * @param fAccess The intended access.
6285 * @param ppvMem Where to return the mapping address.
6286 * @param pLock The PGM lock.
6287 */
6288static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6289{
6290#ifdef IEM_VERIFICATION_MODE_FULL
6291 /* Force the alternative path so we can ignore writes. */
6292 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6293 {
6294 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6295 {
6296 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6297 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6298 if (RT_FAILURE(rc2))
6299 pIemCpu->fProblematicMemory = true;
6300 }
6301 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6302 }
6303#endif
6304#ifdef IEM_LOG_MEMORY_WRITES
6305 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6306 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6307#endif
6308#ifdef IEM_VERIFICATION_MODE_MINIMAL
6309 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6310#endif
6311
6312 /** @todo This API may require some improving later. A private deal with PGM
6313 * regarding locking and unlocking needs to be struct. A couple of TLBs
6314 * living in PGM, but with publicly accessible inlined access methods
6315 * could perhaps be an even better solution. */
6316 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6317 GCPhysMem,
6318 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6319 pIemCpu->fBypassHandlers,
6320 ppvMem,
6321 pLock);
6322 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6323 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6324
6325#ifdef IEM_VERIFICATION_MODE_FULL
6326 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6327 pIemCpu->fProblematicMemory = true;
6328#endif
6329 return rc;
6330}
6331
6332
6333/**
6334 * Unmap a page previously mapped by iemMemPageMap.
6335 *
6336 * @param pIemCpu The IEM per CPU data.
6337 * @param GCPhysMem The physical address.
6338 * @param fAccess The intended access.
6339 * @param pvMem What iemMemPageMap returned.
6340 * @param pLock The PGM lock.
6341 */
6342DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6343{
6344 NOREF(pIemCpu);
6345 NOREF(GCPhysMem);
6346 NOREF(fAccess);
6347 NOREF(pvMem);
6348 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6349}
6350
6351
6352/**
6353 * Looks up a memory mapping entry.
6354 *
6355 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6356 * @param pIemCpu The IEM per CPU data.
6357 * @param pvMem The memory address.
6358 * @param fAccess The access to.
6359 */
6360DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6361{
6362 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6363 if ( pIemCpu->aMemMappings[0].pv == pvMem
6364 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6365 return 0;
6366 if ( pIemCpu->aMemMappings[1].pv == pvMem
6367 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6368 return 1;
6369 if ( pIemCpu->aMemMappings[2].pv == pvMem
6370 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6371 return 2;
6372 return VERR_NOT_FOUND;
6373}
6374
6375
6376/**
6377 * Finds a free memmap entry when using iNextMapping doesn't work.
6378 *
6379 * @returns Memory mapping index, 1024 on failure.
6380 * @param pIemCpu The IEM per CPU data.
6381 */
6382static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6383{
6384 /*
6385 * The easy case.
6386 */
6387 if (pIemCpu->cActiveMappings == 0)
6388 {
6389 pIemCpu->iNextMapping = 1;
6390 return 0;
6391 }
6392
6393 /* There should be enough mappings for all instructions. */
6394 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6395
6396 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6397 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6398 return i;
6399
6400 AssertFailedReturn(1024);
6401}
6402
6403
6404/**
6405 * Commits a bounce buffer that needs writing back and unmaps it.
6406 *
6407 * @returns Strict VBox status code.
6408 * @param pIemCpu The IEM per CPU data.
6409 * @param iMemMap The index of the buffer to commit.
6410 */
6411static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6412{
6413 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6414 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6415
6416 /*
6417 * Do the writing.
6418 */
6419 int rc;
6420#ifndef IEM_VERIFICATION_MODE_MINIMAL
6421 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6422 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6423 {
6424 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6425 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6426 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6427 if (!pIemCpu->fBypassHandlers)
6428 {
6429 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6430 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6431 pbBuf,
6432 cbFirst,
6433 PGMACCESSORIGIN_IEM);
6434 if (cbSecond && rc == VINF_SUCCESS)
6435 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6436 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6437 pbBuf + cbFirst,
6438 cbSecond,
6439 PGMACCESSORIGIN_IEM);
6440 }
6441 else
6442 {
6443 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6444 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6445 pbBuf,
6446 cbFirst);
6447 if (cbSecond && rc == VINF_SUCCESS)
6448 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6449 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6450 pbBuf + cbFirst,
6451 cbSecond);
6452 }
6453 if (rc != VINF_SUCCESS)
6454 {
6455 /** @todo status code handling */
6456 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6457 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
6458 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6459 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6460 }
6461 }
6462 else
6463#endif
6464 rc = VINF_SUCCESS;
6465
6466#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6467 /*
6468 * Record the write(s).
6469 */
6470 if (!pIemCpu->fNoRem)
6471 {
6472 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6473 if (pEvtRec)
6474 {
6475 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6476 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6477 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6478 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6479 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6480 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6481 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6482 }
6483 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6484 {
6485 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6486 if (pEvtRec)
6487 {
6488 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6489 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6490 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6491 memcpy(pEvtRec->u.RamWrite.ab,
6492 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6493 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6494 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6495 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6496 }
6497 }
6498 }
6499#endif
6500#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6501 if (rc == VINF_SUCCESS)
6502 {
6503 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6504 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6505 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6506 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6507 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6508 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6509
6510 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6511 g_cbIemWrote = cbWrote;
6512 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6513 }
6514#endif
6515
6516 /*
6517 * Free the mapping entry.
6518 */
6519 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6520 Assert(pIemCpu->cActiveMappings != 0);
6521 pIemCpu->cActiveMappings--;
6522 return rc;
6523}
6524
6525
6526/**
6527 * iemMemMap worker that deals with a request crossing pages.
6528 */
6529static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
6530 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6531{
6532 /*
6533 * Do the address translations.
6534 */
6535 RTGCPHYS GCPhysFirst;
6536 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6537 if (rcStrict != VINF_SUCCESS)
6538 return rcStrict;
6539
6540/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6541 * last byte. */
6542 RTGCPHYS GCPhysSecond;
6543 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6544 if (rcStrict != VINF_SUCCESS)
6545 return rcStrict;
6546 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6547
6548#ifdef IEM_VERIFICATION_MODE_FULL
6549 /*
6550 * Detect problematic memory when verifying so we can select
6551 * the right execution engine. (TLB: Redo this.)
6552 */
6553 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6554 {
6555 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
6556 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6557 if (RT_SUCCESS(rc2))
6558 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
6559 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6560 if (RT_FAILURE(rc2))
6561 pIemCpu->fProblematicMemory = true;
6562 }
6563#endif
6564
6565
6566 /*
6567 * Read in the current memory content if it's a read, execute or partial
6568 * write access.
6569 */
6570 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6571 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6572 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6573
6574 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6575 {
6576 int rc;
6577 if (!pIemCpu->fBypassHandlers)
6578 {
6579 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6580 if (rc != VINF_SUCCESS)
6581 {
6582 /** @todo status code handling */
6583 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6584 return rc;
6585 }
6586 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6587 if (rc != VINF_SUCCESS)
6588 {
6589 /** @todo status code handling */
6590 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6591 return rc;
6592 }
6593 }
6594 else
6595 {
6596 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
6597 if (rc != VINF_SUCCESS)
6598 {
6599 /** @todo status code handling */
6600 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6601 return rc;
6602 }
6603 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6604 if (rc != VINF_SUCCESS)
6605 {
6606 /** @todo status code handling */
6607 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6608 return rc;
6609 }
6610 }
6611
6612#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6613 if ( !pIemCpu->fNoRem
6614 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6615 {
6616 /*
6617 * Record the reads.
6618 */
6619 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6620 if (pEvtRec)
6621 {
6622 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6623 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6624 pEvtRec->u.RamRead.cb = cbFirstPage;
6625 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6626 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6627 }
6628 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6629 if (pEvtRec)
6630 {
6631 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6632 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6633 pEvtRec->u.RamRead.cb = cbSecondPage;
6634 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6635 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6636 }
6637 }
6638#endif
6639 }
6640#ifdef VBOX_STRICT
6641 else
6642 memset(pbBuf, 0xcc, cbMem);
6643#endif
6644#ifdef VBOX_STRICT
6645 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6646 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6647#endif
6648
6649 /*
6650 * Commit the bounce buffer entry.
6651 */
6652 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6653 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6654 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6655 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6656 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6657 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6658 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6659 pIemCpu->iNextMapping = iMemMap + 1;
6660 pIemCpu->cActiveMappings++;
6661
6662 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6663 *ppvMem = pbBuf;
6664 return VINF_SUCCESS;
6665}
6666
6667
6668/**
6669 * iemMemMap woker that deals with iemMemPageMap failures.
6670 */
6671static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6672 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6673{
6674 /*
6675 * Filter out conditions we can handle and the ones which shouldn't happen.
6676 */
6677 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6678 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6679 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6680 {
6681 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6682 return rcMap;
6683 }
6684 pIemCpu->cPotentialExits++;
6685
6686 /*
6687 * Read in the current memory content if it's a read, execute or partial
6688 * write access.
6689 */
6690 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6691 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6692 {
6693 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6694 memset(pbBuf, 0xff, cbMem);
6695 else
6696 {
6697 int rc;
6698 if (!pIemCpu->fBypassHandlers)
6699 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6700 else
6701 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6702 if (rc != VINF_SUCCESS)
6703 {
6704 /** @todo status code handling */
6705 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
6706 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
6707 return rc;
6708 }
6709 }
6710
6711#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6712 if ( !pIemCpu->fNoRem
6713 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6714 {
6715 /*
6716 * Record the read.
6717 */
6718 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6719 if (pEvtRec)
6720 {
6721 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6722 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6723 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6724 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6725 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6726 }
6727 }
6728#endif
6729 }
6730#ifdef VBOX_STRICT
6731 else
6732 memset(pbBuf, 0xcc, cbMem);
6733#endif
6734#ifdef VBOX_STRICT
6735 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6736 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6737#endif
6738
6739 /*
6740 * Commit the bounce buffer entry.
6741 */
6742 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6743 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6744 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6745 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6746 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6747 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6748 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6749 pIemCpu->iNextMapping = iMemMap + 1;
6750 pIemCpu->cActiveMappings++;
6751
6752 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6753 *ppvMem = pbBuf;
6754 return VINF_SUCCESS;
6755}
6756
6757
6758
6759/**
6760 * Maps the specified guest memory for the given kind of access.
6761 *
6762 * This may be using bounce buffering of the memory if it's crossing a page
6763 * boundary or if there is an access handler installed for any of it. Because
6764 * of lock prefix guarantees, we're in for some extra clutter when this
6765 * happens.
6766 *
6767 * This may raise a \#GP, \#SS, \#PF or \#AC.
6768 *
6769 * @returns VBox strict status code.
6770 *
6771 * @param pIemCpu The IEM per CPU data.
6772 * @param ppvMem Where to return the pointer to the mapped
6773 * memory.
6774 * @param cbMem The number of bytes to map. This is usually 1,
6775 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6776 * string operations it can be up to a page.
6777 * @param iSegReg The index of the segment register to use for
6778 * this access. The base and limits are checked.
6779 * Use UINT8_MAX to indicate that no segmentation
6780 * is required (for IDT, GDT and LDT accesses).
6781 * @param GCPtrMem The address of the guest memory.
6782 * @param a_fAccess How the memory is being accessed. The
6783 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6784 * how to map the memory, while the
6785 * IEM_ACCESS_WHAT_XXX bit is used when raising
6786 * exceptions.
6787 */
6788static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6789{
6790 /*
6791 * Check the input and figure out which mapping entry to use.
6792 */
6793 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6794 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6795
6796 unsigned iMemMap = pIemCpu->iNextMapping;
6797 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6798 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6799 {
6800 iMemMap = iemMemMapFindFree(pIemCpu);
6801 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6802 }
6803
6804 /*
6805 * Map the memory, checking that we can actually access it. If something
6806 * slightly complicated happens, fall back on bounce buffering.
6807 */
6808 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6809 if (rcStrict != VINF_SUCCESS)
6810 return rcStrict;
6811
6812 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6813 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6814
6815 RTGCPHYS GCPhysFirst;
6816 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6817 if (rcStrict != VINF_SUCCESS)
6818 return rcStrict;
6819
6820 void *pvMem;
6821 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6822 if (rcStrict != VINF_SUCCESS)
6823 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6824
6825 /*
6826 * Fill in the mapping table entry.
6827 */
6828 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6829 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6830 pIemCpu->iNextMapping = iMemMap + 1;
6831 pIemCpu->cActiveMappings++;
6832
6833 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6834 *ppvMem = pvMem;
6835 return VINF_SUCCESS;
6836}
6837
6838
6839/**
6840 * Commits the guest memory if bounce buffered and unmaps it.
6841 *
6842 * @returns Strict VBox status code.
6843 * @param pIemCpu The IEM per CPU data.
6844 * @param pvMem The mapping.
6845 * @param fAccess The kind of access.
6846 */
6847static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6848{
6849 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
6850 AssertReturn(iMemMap >= 0, iMemMap);
6851
6852 /* If it's bounce buffered, we may need to write back the buffer. */
6853 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6854 {
6855 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6856 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
6857 }
6858 /* Otherwise unlock it. */
6859 else
6860 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6861
6862 /* Free the entry. */
6863 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6864 Assert(pIemCpu->cActiveMappings != 0);
6865 pIemCpu->cActiveMappings--;
6866 return VINF_SUCCESS;
6867}
6868
6869
6870/**
6871 * Rollbacks mappings, releasing page locks and such.
6872 *
6873 * The caller shall only call this after checking cActiveMappings.
6874 *
6875 * @returns Strict VBox status code to pass up.
6876 * @param pIemCpu The IEM per CPU data.
6877 */
6878static void iemMemRollback(PIEMCPU pIemCpu)
6879{
6880 Assert(pIemCpu->cActiveMappings > 0);
6881
6882 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
6883 while (iMemMap-- > 0)
6884 {
6885 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
6886 if (fAccess != IEM_ACCESS_INVALID)
6887 {
6888 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6889 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
6890 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6891 Assert(pIemCpu->cActiveMappings > 0);
6892 pIemCpu->cActiveMappings--;
6893 }
6894 }
6895}
6896
6897
6898/**
6899 * Fetches a data byte.
6900 *
6901 * @returns Strict VBox status code.
6902 * @param pIemCpu The IEM per CPU data.
6903 * @param pu8Dst Where to return the byte.
6904 * @param iSegReg The index of the segment register to use for
6905 * this access. The base and limits are checked.
6906 * @param GCPtrMem The address of the guest memory.
6907 */
6908static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6909{
6910 /* The lazy approach for now... */
6911 uint8_t const *pu8Src;
6912 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6913 if (rc == VINF_SUCCESS)
6914 {
6915 *pu8Dst = *pu8Src;
6916 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6917 }
6918 return rc;
6919}
6920
6921
6922/**
6923 * Fetches a data word.
6924 *
6925 * @returns Strict VBox status code.
6926 * @param pIemCpu The IEM per CPU data.
6927 * @param pu16Dst Where to return the word.
6928 * @param iSegReg The index of the segment register to use for
6929 * this access. The base and limits are checked.
6930 * @param GCPtrMem The address of the guest memory.
6931 */
6932static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6933{
6934 /* The lazy approach for now... */
6935 uint16_t const *pu16Src;
6936 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6937 if (rc == VINF_SUCCESS)
6938 {
6939 *pu16Dst = *pu16Src;
6940 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6941 }
6942 return rc;
6943}
6944
6945
6946/**
6947 * Fetches a data dword.
6948 *
6949 * @returns Strict VBox status code.
6950 * @param pIemCpu The IEM per CPU data.
6951 * @param pu32Dst Where to return the dword.
6952 * @param iSegReg The index of the segment register to use for
6953 * this access. The base and limits are checked.
6954 * @param GCPtrMem The address of the guest memory.
6955 */
6956static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6957{
6958 /* The lazy approach for now... */
6959 uint32_t const *pu32Src;
6960 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6961 if (rc == VINF_SUCCESS)
6962 {
6963 *pu32Dst = *pu32Src;
6964 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6965 }
6966 return rc;
6967}
6968
6969
6970#ifdef SOME_UNUSED_FUNCTION
6971/**
6972 * Fetches a data dword and sign extends it to a qword.
6973 *
6974 * @returns Strict VBox status code.
6975 * @param pIemCpu The IEM per CPU data.
6976 * @param pu64Dst Where to return the sign extended value.
6977 * @param iSegReg The index of the segment register to use for
6978 * this access. The base and limits are checked.
6979 * @param GCPtrMem The address of the guest memory.
6980 */
6981static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6982{
6983 /* The lazy approach for now... */
6984 int32_t const *pi32Src;
6985 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6986 if (rc == VINF_SUCCESS)
6987 {
6988 *pu64Dst = *pi32Src;
6989 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6990 }
6991#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6992 else
6993 *pu64Dst = 0;
6994#endif
6995 return rc;
6996}
6997#endif
6998
6999
7000/**
7001 * Fetches a data qword.
7002 *
7003 * @returns Strict VBox status code.
7004 * @param pIemCpu The IEM per CPU data.
7005 * @param pu64Dst Where to return the qword.
7006 * @param iSegReg The index of the segment register to use for
7007 * this access. The base and limits are checked.
7008 * @param GCPtrMem The address of the guest memory.
7009 */
7010static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7011{
7012 /* The lazy approach for now... */
7013 uint64_t const *pu64Src;
7014 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7015 if (rc == VINF_SUCCESS)
7016 {
7017 *pu64Dst = *pu64Src;
7018 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7019 }
7020 return rc;
7021}
7022
7023
7024/**
7025 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7026 *
7027 * @returns Strict VBox status code.
7028 * @param pIemCpu The IEM per CPU data.
7029 * @param pu64Dst Where to return the qword.
7030 * @param iSegReg The index of the segment register to use for
7031 * this access. The base and limits are checked.
7032 * @param GCPtrMem The address of the guest memory.
7033 */
7034static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7035{
7036 /* The lazy approach for now... */
7037 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7038 if (RT_UNLIKELY(GCPtrMem & 15))
7039 return iemRaiseGeneralProtectionFault0(pIemCpu);
7040
7041 uint64_t const *pu64Src;
7042 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7043 if (rc == VINF_SUCCESS)
7044 {
7045 *pu64Dst = *pu64Src;
7046 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7047 }
7048 return rc;
7049}
7050
7051
7052/**
7053 * Fetches a data tword.
7054 *
7055 * @returns Strict VBox status code.
7056 * @param pIemCpu The IEM per CPU data.
7057 * @param pr80Dst Where to return the tword.
7058 * @param iSegReg The index of the segment register to use for
7059 * this access. The base and limits are checked.
7060 * @param GCPtrMem The address of the guest memory.
7061 */
7062static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7063{
7064 /* The lazy approach for now... */
7065 PCRTFLOAT80U pr80Src;
7066 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7067 if (rc == VINF_SUCCESS)
7068 {
7069 *pr80Dst = *pr80Src;
7070 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7071 }
7072 return rc;
7073}
7074
7075
7076/**
7077 * Fetches a data dqword (double qword), generally SSE related.
7078 *
7079 * @returns Strict VBox status code.
7080 * @param pIemCpu The IEM per CPU data.
7081 * @param pu128Dst Where to return the qword.
7082 * @param iSegReg The index of the segment register to use for
7083 * this access. The base and limits are checked.
7084 * @param GCPtrMem The address of the guest memory.
7085 */
7086static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7087{
7088 /* The lazy approach for now... */
7089 uint128_t const *pu128Src;
7090 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7091 if (rc == VINF_SUCCESS)
7092 {
7093 *pu128Dst = *pu128Src;
7094 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7095 }
7096 return rc;
7097}
7098
7099
7100/**
7101 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7102 * related.
7103 *
7104 * Raises \#GP(0) if not aligned.
7105 *
7106 * @returns Strict VBox status code.
7107 * @param pIemCpu The IEM per CPU data.
7108 * @param pu128Dst Where to return the qword.
7109 * @param iSegReg The index of the segment register to use for
7110 * this access. The base and limits are checked.
7111 * @param GCPtrMem The address of the guest memory.
7112 */
7113static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7114{
7115 /* The lazy approach for now... */
7116 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7117 if ( (GCPtrMem & 15)
7118 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7119 return iemRaiseGeneralProtectionFault0(pIemCpu);
7120
7121 uint128_t const *pu128Src;
7122 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7123 if (rc == VINF_SUCCESS)
7124 {
7125 *pu128Dst = *pu128Src;
7126 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7127 }
7128 return rc;
7129}
7130
7131
7132
7133
7134/**
7135 * Fetches a descriptor register (lgdt, lidt).
7136 *
7137 * @returns Strict VBox status code.
7138 * @param pIemCpu The IEM per CPU data.
7139 * @param pcbLimit Where to return the limit.
7140 * @param pGCPTrBase Where to return the base.
7141 * @param iSegReg The index of the segment register to use for
7142 * this access. The base and limits are checked.
7143 * @param GCPtrMem The address of the guest memory.
7144 * @param enmOpSize The effective operand size.
7145 */
7146static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
7147 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7148{
7149 uint8_t const *pu8Src;
7150 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7151 (void **)&pu8Src,
7152 enmOpSize == IEMMODE_64BIT
7153 ? 2 + 8
7154 : enmOpSize == IEMMODE_32BIT
7155 ? 2 + 4
7156 : 2 + 3,
7157 iSegReg,
7158 GCPtrMem,
7159 IEM_ACCESS_DATA_R);
7160 if (rcStrict == VINF_SUCCESS)
7161 {
7162 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7163 switch (enmOpSize)
7164 {
7165 case IEMMODE_16BIT:
7166 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7167 break;
7168 case IEMMODE_32BIT:
7169 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7170 break;
7171 case IEMMODE_64BIT:
7172 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7173 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7174 break;
7175
7176 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7177 }
7178 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7179 }
7180 return rcStrict;
7181}
7182
7183
7184
7185/**
7186 * Stores a data byte.
7187 *
7188 * @returns Strict VBox status code.
7189 * @param pIemCpu The IEM per CPU data.
7190 * @param iSegReg The index of the segment register to use for
7191 * this access. The base and limits are checked.
7192 * @param GCPtrMem The address of the guest memory.
7193 * @param u8Value The value to store.
7194 */
7195static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7196{
7197 /* The lazy approach for now... */
7198 uint8_t *pu8Dst;
7199 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7200 if (rc == VINF_SUCCESS)
7201 {
7202 *pu8Dst = u8Value;
7203 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7204 }
7205 return rc;
7206}
7207
7208
7209/**
7210 * Stores a data word.
7211 *
7212 * @returns Strict VBox status code.
7213 * @param pIemCpu The IEM per CPU data.
7214 * @param iSegReg The index of the segment register to use for
7215 * this access. The base and limits are checked.
7216 * @param GCPtrMem The address of the guest memory.
7217 * @param u16Value The value to store.
7218 */
7219static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7220{
7221 /* The lazy approach for now... */
7222 uint16_t *pu16Dst;
7223 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7224 if (rc == VINF_SUCCESS)
7225 {
7226 *pu16Dst = u16Value;
7227 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7228 }
7229 return rc;
7230}
7231
7232
7233/**
7234 * Stores a data dword.
7235 *
7236 * @returns Strict VBox status code.
7237 * @param pIemCpu The IEM per CPU data.
7238 * @param iSegReg The index of the segment register to use for
7239 * this access. The base and limits are checked.
7240 * @param GCPtrMem The address of the guest memory.
7241 * @param u32Value The value to store.
7242 */
7243static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7244{
7245 /* The lazy approach for now... */
7246 uint32_t *pu32Dst;
7247 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7248 if (rc == VINF_SUCCESS)
7249 {
7250 *pu32Dst = u32Value;
7251 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7252 }
7253 return rc;
7254}
7255
7256
7257/**
7258 * Stores a data qword.
7259 *
7260 * @returns Strict VBox status code.
7261 * @param pIemCpu The IEM per CPU data.
7262 * @param iSegReg The index of the segment register to use for
7263 * this access. The base and limits are checked.
7264 * @param GCPtrMem The address of the guest memory.
7265 * @param u64Value The value to store.
7266 */
7267static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7268{
7269 /* The lazy approach for now... */
7270 uint64_t *pu64Dst;
7271 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7272 if (rc == VINF_SUCCESS)
7273 {
7274 *pu64Dst = u64Value;
7275 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7276 }
7277 return rc;
7278}
7279
7280
7281/**
7282 * Stores a data dqword.
7283 *
7284 * @returns Strict VBox status code.
7285 * @param pIemCpu The IEM per CPU data.
7286 * @param iSegReg The index of the segment register to use for
7287 * this access. The base and limits are checked.
7288 * @param GCPtrMem The address of the guest memory.
7289 * @param u64Value The value to store.
7290 */
7291static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7292{
7293 /* The lazy approach for now... */
7294 uint128_t *pu128Dst;
7295 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7296 if (rc == VINF_SUCCESS)
7297 {
7298 *pu128Dst = u128Value;
7299 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7300 }
7301 return rc;
7302}
7303
7304
7305/**
7306 * Stores a data dqword, SSE aligned.
7307 *
7308 * @returns Strict VBox status code.
7309 * @param pIemCpu The IEM per CPU data.
7310 * @param iSegReg The index of the segment register to use for
7311 * this access. The base and limits are checked.
7312 * @param GCPtrMem The address of the guest memory.
7313 * @param u64Value The value to store.
7314 */
7315static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7316{
7317 /* The lazy approach for now... */
7318 if ( (GCPtrMem & 15)
7319 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7320 return iemRaiseGeneralProtectionFault0(pIemCpu);
7321
7322 uint128_t *pu128Dst;
7323 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7324 if (rc == VINF_SUCCESS)
7325 {
7326 *pu128Dst = u128Value;
7327 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7328 }
7329 return rc;
7330}
7331
7332
7333/**
7334 * Stores a descriptor register (sgdt, sidt).
7335 *
7336 * @returns Strict VBox status code.
7337 * @param pIemCpu The IEM per CPU data.
7338 * @param cbLimit The limit.
7339 * @param GCPTrBase The base address.
7340 * @param iSegReg The index of the segment register to use for
7341 * this access. The base and limits are checked.
7342 * @param GCPtrMem The address of the guest memory.
7343 * @param enmOpSize The effective operand size.
7344 */
7345static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
7346 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7347{
7348 uint8_t *pu8Src;
7349 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7350 (void **)&pu8Src,
7351 enmOpSize == IEMMODE_64BIT
7352 ? 2 + 8
7353 : enmOpSize == IEMMODE_32BIT
7354 ? 2 + 4
7355 : 2 + 3,
7356 iSegReg,
7357 GCPtrMem,
7358 IEM_ACCESS_DATA_W);
7359 if (rcStrict == VINF_SUCCESS)
7360 {
7361 pu8Src[0] = RT_BYTE1(cbLimit);
7362 pu8Src[1] = RT_BYTE2(cbLimit);
7363 pu8Src[2] = RT_BYTE1(GCPtrBase);
7364 pu8Src[3] = RT_BYTE2(GCPtrBase);
7365 pu8Src[4] = RT_BYTE3(GCPtrBase);
7366 if (enmOpSize == IEMMODE_16BIT)
7367 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7368 else
7369 {
7370 pu8Src[5] = RT_BYTE4(GCPtrBase);
7371 if (enmOpSize == IEMMODE_64BIT)
7372 {
7373 pu8Src[6] = RT_BYTE5(GCPtrBase);
7374 pu8Src[7] = RT_BYTE6(GCPtrBase);
7375 pu8Src[8] = RT_BYTE7(GCPtrBase);
7376 pu8Src[9] = RT_BYTE8(GCPtrBase);
7377 }
7378 }
7379 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7380 }
7381 return rcStrict;
7382}
7383
7384
7385/**
7386 * Pushes a word onto the stack.
7387 *
7388 * @returns Strict VBox status code.
7389 * @param pIemCpu The IEM per CPU data.
7390 * @param u16Value The value to push.
7391 */
7392static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7393{
7394 /* Increment the stack pointer. */
7395 uint64_t uNewRsp;
7396 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7397 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7398
7399 /* Write the word the lazy way. */
7400 uint16_t *pu16Dst;
7401 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7402 if (rc == VINF_SUCCESS)
7403 {
7404 *pu16Dst = u16Value;
7405 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7406 }
7407
7408 /* Commit the new RSP value unless we an access handler made trouble. */
7409 if (rc == VINF_SUCCESS)
7410 pCtx->rsp = uNewRsp;
7411
7412 return rc;
7413}
7414
7415
7416/**
7417 * Pushes a dword onto the stack.
7418 *
7419 * @returns Strict VBox status code.
7420 * @param pIemCpu The IEM per CPU data.
7421 * @param u32Value The value to push.
7422 */
7423static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7424{
7425 /* Increment the stack pointer. */
7426 uint64_t uNewRsp;
7427 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7428 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7429
7430 /* Write the dword the lazy way. */
7431 uint32_t *pu32Dst;
7432 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7433 if (rc == VINF_SUCCESS)
7434 {
7435 *pu32Dst = u32Value;
7436 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7437 }
7438
7439 /* Commit the new RSP value unless we an access handler made trouble. */
7440 if (rc == VINF_SUCCESS)
7441 pCtx->rsp = uNewRsp;
7442
7443 return rc;
7444}
7445
7446
7447/**
7448 * Pushes a dword segment register value onto the stack.
7449 *
7450 * @returns Strict VBox status code.
7451 * @param pIemCpu The IEM per CPU data.
7452 * @param u16Value The value to push.
7453 */
7454static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7455{
7456 /* Increment the stack pointer. */
7457 uint64_t uNewRsp;
7458 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7459 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7460
7461 VBOXSTRICTRC rc;
7462 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7463 {
7464 /* The recompiler writes a full dword. */
7465 uint32_t *pu32Dst;
7466 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7467 if (rc == VINF_SUCCESS)
7468 {
7469 *pu32Dst = u32Value;
7470 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7471 }
7472 }
7473 else
7474 {
7475 /* The intel docs talks about zero extending the selector register
7476 value. My actual intel CPU here might be zero extending the value
7477 but it still only writes the lower word... */
7478 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7479 * happens when crossing an electric page boundrary, is the high word
7480 * checked for write accessibility or not? Probably it is. What about
7481 * segment limits? */
7482 uint16_t *pu16Dst;
7483 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7484 if (rc == VINF_SUCCESS)
7485 {
7486 *pu16Dst = (uint16_t)u32Value;
7487 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7488 }
7489 }
7490
7491 /* Commit the new RSP value unless we an access handler made trouble. */
7492 if (rc == VINF_SUCCESS)
7493 pCtx->rsp = uNewRsp;
7494
7495 return rc;
7496}
7497
7498
7499/**
7500 * Pushes a qword onto the stack.
7501 *
7502 * @returns Strict VBox status code.
7503 * @param pIemCpu The IEM per CPU data.
7504 * @param u64Value The value to push.
7505 */
7506static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7507{
7508 /* Increment the stack pointer. */
7509 uint64_t uNewRsp;
7510 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7511 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7512
7513 /* Write the word the lazy way. */
7514 uint64_t *pu64Dst;
7515 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7516 if (rc == VINF_SUCCESS)
7517 {
7518 *pu64Dst = u64Value;
7519 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7520 }
7521
7522 /* Commit the new RSP value unless we an access handler made trouble. */
7523 if (rc == VINF_SUCCESS)
7524 pCtx->rsp = uNewRsp;
7525
7526 return rc;
7527}
7528
7529
7530/**
7531 * Pops a word from the stack.
7532 *
7533 * @returns Strict VBox status code.
7534 * @param pIemCpu The IEM per CPU data.
7535 * @param pu16Value Where to store the popped value.
7536 */
7537static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7538{
7539 /* Increment the stack pointer. */
7540 uint64_t uNewRsp;
7541 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7542 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7543
7544 /* Write the word the lazy way. */
7545 uint16_t const *pu16Src;
7546 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7547 if (rc == VINF_SUCCESS)
7548 {
7549 *pu16Value = *pu16Src;
7550 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7551
7552 /* Commit the new RSP value. */
7553 if (rc == VINF_SUCCESS)
7554 pCtx->rsp = uNewRsp;
7555 }
7556
7557 return rc;
7558}
7559
7560
7561/**
7562 * Pops a dword from the stack.
7563 *
7564 * @returns Strict VBox status code.
7565 * @param pIemCpu The IEM per CPU data.
7566 * @param pu32Value Where to store the popped value.
7567 */
7568static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7569{
7570 /* Increment the stack pointer. */
7571 uint64_t uNewRsp;
7572 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7573 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7574
7575 /* Write the word the lazy way. */
7576 uint32_t const *pu32Src;
7577 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7578 if (rc == VINF_SUCCESS)
7579 {
7580 *pu32Value = *pu32Src;
7581 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7582
7583 /* Commit the new RSP value. */
7584 if (rc == VINF_SUCCESS)
7585 pCtx->rsp = uNewRsp;
7586 }
7587
7588 return rc;
7589}
7590
7591
7592/**
7593 * Pops a qword from the stack.
7594 *
7595 * @returns Strict VBox status code.
7596 * @param pIemCpu The IEM per CPU data.
7597 * @param pu64Value Where to store the popped value.
7598 */
7599static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7600{
7601 /* Increment the stack pointer. */
7602 uint64_t uNewRsp;
7603 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7604 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7605
7606 /* Write the word the lazy way. */
7607 uint64_t const *pu64Src;
7608 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7609 if (rc == VINF_SUCCESS)
7610 {
7611 *pu64Value = *pu64Src;
7612 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7613
7614 /* Commit the new RSP value. */
7615 if (rc == VINF_SUCCESS)
7616 pCtx->rsp = uNewRsp;
7617 }
7618
7619 return rc;
7620}
7621
7622
7623/**
7624 * Pushes a word onto the stack, using a temporary stack pointer.
7625 *
7626 * @returns Strict VBox status code.
7627 * @param pIemCpu The IEM per CPU data.
7628 * @param u16Value The value to push.
7629 * @param pTmpRsp Pointer to the temporary stack pointer.
7630 */
7631static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7632{
7633 /* Increment the stack pointer. */
7634 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7635 RTUINT64U NewRsp = *pTmpRsp;
7636 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7637
7638 /* Write the word the lazy way. */
7639 uint16_t *pu16Dst;
7640 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7641 if (rc == VINF_SUCCESS)
7642 {
7643 *pu16Dst = u16Value;
7644 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7645 }
7646
7647 /* Commit the new RSP value unless we an access handler made trouble. */
7648 if (rc == VINF_SUCCESS)
7649 *pTmpRsp = NewRsp;
7650
7651 return rc;
7652}
7653
7654
7655/**
7656 * Pushes a dword onto the stack, using a temporary stack pointer.
7657 *
7658 * @returns Strict VBox status code.
7659 * @param pIemCpu The IEM per CPU data.
7660 * @param u32Value The value to push.
7661 * @param pTmpRsp Pointer to the temporary stack pointer.
7662 */
7663static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7664{
7665 /* Increment the stack pointer. */
7666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7667 RTUINT64U NewRsp = *pTmpRsp;
7668 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7669
7670 /* Write the word the lazy way. */
7671 uint32_t *pu32Dst;
7672 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7673 if (rc == VINF_SUCCESS)
7674 {
7675 *pu32Dst = u32Value;
7676 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7677 }
7678
7679 /* Commit the new RSP value unless we an access handler made trouble. */
7680 if (rc == VINF_SUCCESS)
7681 *pTmpRsp = NewRsp;
7682
7683 return rc;
7684}
7685
7686
7687/**
7688 * Pushes a dword onto the stack, using a temporary stack pointer.
7689 *
7690 * @returns Strict VBox status code.
7691 * @param pIemCpu The IEM per CPU data.
7692 * @param u64Value The value to push.
7693 * @param pTmpRsp Pointer to the temporary stack pointer.
7694 */
7695static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7696{
7697 /* Increment the stack pointer. */
7698 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7699 RTUINT64U NewRsp = *pTmpRsp;
7700 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7701
7702 /* Write the word the lazy way. */
7703 uint64_t *pu64Dst;
7704 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7705 if (rc == VINF_SUCCESS)
7706 {
7707 *pu64Dst = u64Value;
7708 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7709 }
7710
7711 /* Commit the new RSP value unless we an access handler made trouble. */
7712 if (rc == VINF_SUCCESS)
7713 *pTmpRsp = NewRsp;
7714
7715 return rc;
7716}
7717
7718
7719/**
7720 * Pops a word from the stack, using a temporary stack pointer.
7721 *
7722 * @returns Strict VBox status code.
7723 * @param pIemCpu The IEM per CPU data.
7724 * @param pu16Value Where to store the popped value.
7725 * @param pTmpRsp Pointer to the temporary stack pointer.
7726 */
7727static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7728{
7729 /* Increment the stack pointer. */
7730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7731 RTUINT64U NewRsp = *pTmpRsp;
7732 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7733
7734 /* Write the word the lazy way. */
7735 uint16_t const *pu16Src;
7736 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7737 if (rc == VINF_SUCCESS)
7738 {
7739 *pu16Value = *pu16Src;
7740 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7741
7742 /* Commit the new RSP value. */
7743 if (rc == VINF_SUCCESS)
7744 *pTmpRsp = NewRsp;
7745 }
7746
7747 return rc;
7748}
7749
7750
7751/**
7752 * Pops a dword from the stack, using a temporary stack pointer.
7753 *
7754 * @returns Strict VBox status code.
7755 * @param pIemCpu The IEM per CPU data.
7756 * @param pu32Value Where to store the popped value.
7757 * @param pTmpRsp Pointer to the temporary stack pointer.
7758 */
7759static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7760{
7761 /* Increment the stack pointer. */
7762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7763 RTUINT64U NewRsp = *pTmpRsp;
7764 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7765
7766 /* Write the word the lazy way. */
7767 uint32_t const *pu32Src;
7768 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7769 if (rc == VINF_SUCCESS)
7770 {
7771 *pu32Value = *pu32Src;
7772 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7773
7774 /* Commit the new RSP value. */
7775 if (rc == VINF_SUCCESS)
7776 *pTmpRsp = NewRsp;
7777 }
7778
7779 return rc;
7780}
7781
7782
7783/**
7784 * Pops a qword from the stack, using a temporary stack pointer.
7785 *
7786 * @returns Strict VBox status code.
7787 * @param pIemCpu The IEM per CPU data.
7788 * @param pu64Value Where to store the popped value.
7789 * @param pTmpRsp Pointer to the temporary stack pointer.
7790 */
7791static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7792{
7793 /* Increment the stack pointer. */
7794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7795 RTUINT64U NewRsp = *pTmpRsp;
7796 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7797
7798 /* Write the word the lazy way. */
7799 uint64_t const *pu64Src;
7800 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7801 if (rcStrict == VINF_SUCCESS)
7802 {
7803 *pu64Value = *pu64Src;
7804 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7805
7806 /* Commit the new RSP value. */
7807 if (rcStrict == VINF_SUCCESS)
7808 *pTmpRsp = NewRsp;
7809 }
7810
7811 return rcStrict;
7812}
7813
7814
7815/**
7816 * Begin a special stack push (used by interrupt, exceptions and such).
7817 *
7818 * This will raise #SS or #PF if appropriate.
7819 *
7820 * @returns Strict VBox status code.
7821 * @param pIemCpu The IEM per CPU data.
7822 * @param cbMem The number of bytes to push onto the stack.
7823 * @param ppvMem Where to return the pointer to the stack memory.
7824 * As with the other memory functions this could be
7825 * direct access or bounce buffered access, so
7826 * don't commit register until the commit call
7827 * succeeds.
7828 * @param puNewRsp Where to return the new RSP value. This must be
7829 * passed unchanged to
7830 * iemMemStackPushCommitSpecial().
7831 */
7832static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7833{
7834 Assert(cbMem < UINT8_MAX);
7835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7836 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7837 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7838}
7839
7840
7841/**
7842 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7843 *
7844 * This will update the rSP.
7845 *
7846 * @returns Strict VBox status code.
7847 * @param pIemCpu The IEM per CPU data.
7848 * @param pvMem The pointer returned by
7849 * iemMemStackPushBeginSpecial().
7850 * @param uNewRsp The new RSP value returned by
7851 * iemMemStackPushBeginSpecial().
7852 */
7853static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
7854{
7855 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
7856 if (rcStrict == VINF_SUCCESS)
7857 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7858 return rcStrict;
7859}
7860
7861
7862/**
7863 * Begin a special stack pop (used by iret, retf and such).
7864 *
7865 * This will raise \#SS or \#PF if appropriate.
7866 *
7867 * @returns Strict VBox status code.
7868 * @param pIemCpu The IEM per CPU data.
7869 * @param cbMem The number of bytes to push onto the stack.
7870 * @param ppvMem Where to return the pointer to the stack memory.
7871 * @param puNewRsp Where to return the new RSP value. This must be
7872 * passed unchanged to
7873 * iemMemStackPopCommitSpecial() or applied
7874 * manually if iemMemStackPopDoneSpecial() is used.
7875 */
7876static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7877{
7878 Assert(cbMem < UINT8_MAX);
7879 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7880 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7881 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7882}
7883
7884
7885/**
7886 * Continue a special stack pop (used by iret and retf).
7887 *
7888 * This will raise \#SS or \#PF if appropriate.
7889 *
7890 * @returns Strict VBox status code.
7891 * @param pIemCpu The IEM per CPU data.
7892 * @param cbMem The number of bytes to push onto the stack.
7893 * @param ppvMem Where to return the pointer to the stack memory.
7894 * @param puNewRsp Where to return the new RSP value. This must be
7895 * passed unchanged to
7896 * iemMemStackPopCommitSpecial() or applied
7897 * manually if iemMemStackPopDoneSpecial() is used.
7898 */
7899static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7900{
7901 Assert(cbMem < UINT8_MAX);
7902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7903 RTUINT64U NewRsp;
7904 NewRsp.u = *puNewRsp;
7905 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7906 *puNewRsp = NewRsp.u;
7907 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7908}
7909
7910
7911/**
7912 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7913 *
7914 * This will update the rSP.
7915 *
7916 * @returns Strict VBox status code.
7917 * @param pIemCpu The IEM per CPU data.
7918 * @param pvMem The pointer returned by
7919 * iemMemStackPopBeginSpecial().
7920 * @param uNewRsp The new RSP value returned by
7921 * iemMemStackPopBeginSpecial().
7922 */
7923static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7924{
7925 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7926 if (rcStrict == VINF_SUCCESS)
7927 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7928 return rcStrict;
7929}
7930
7931
7932/**
7933 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7934 * iemMemStackPopContinueSpecial).
7935 *
7936 * The caller will manually commit the rSP.
7937 *
7938 * @returns Strict VBox status code.
7939 * @param pIemCpu The IEM per CPU data.
7940 * @param pvMem The pointer returned by
7941 * iemMemStackPopBeginSpecial() or
7942 * iemMemStackPopContinueSpecial().
7943 */
7944static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
7945{
7946 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7947}
7948
7949
7950/**
7951 * Fetches a system table byte.
7952 *
7953 * @returns Strict VBox status code.
7954 * @param pIemCpu The IEM per CPU data.
7955 * @param pbDst Where to return the byte.
7956 * @param iSegReg The index of the segment register to use for
7957 * this access. The base and limits are checked.
7958 * @param GCPtrMem The address of the guest memory.
7959 */
7960static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7961{
7962 /* The lazy approach for now... */
7963 uint8_t const *pbSrc;
7964 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7965 if (rc == VINF_SUCCESS)
7966 {
7967 *pbDst = *pbSrc;
7968 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7969 }
7970 return rc;
7971}
7972
7973
7974/**
7975 * Fetches a system table word.
7976 *
7977 * @returns Strict VBox status code.
7978 * @param pIemCpu The IEM per CPU data.
7979 * @param pu16Dst Where to return the word.
7980 * @param iSegReg The index of the segment register to use for
7981 * this access. The base and limits are checked.
7982 * @param GCPtrMem The address of the guest memory.
7983 */
7984static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7985{
7986 /* The lazy approach for now... */
7987 uint16_t const *pu16Src;
7988 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7989 if (rc == VINF_SUCCESS)
7990 {
7991 *pu16Dst = *pu16Src;
7992 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7993 }
7994 return rc;
7995}
7996
7997
7998/**
7999 * Fetches a system table dword.
8000 *
8001 * @returns Strict VBox status code.
8002 * @param pIemCpu The IEM per CPU data.
8003 * @param pu32Dst Where to return the dword.
8004 * @param iSegReg The index of the segment register to use for
8005 * this access. The base and limits are checked.
8006 * @param GCPtrMem The address of the guest memory.
8007 */
8008static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8009{
8010 /* The lazy approach for now... */
8011 uint32_t const *pu32Src;
8012 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8013 if (rc == VINF_SUCCESS)
8014 {
8015 *pu32Dst = *pu32Src;
8016 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8017 }
8018 return rc;
8019}
8020
8021
8022/**
8023 * Fetches a system table qword.
8024 *
8025 * @returns Strict VBox status code.
8026 * @param pIemCpu The IEM per CPU data.
8027 * @param pu64Dst Where to return the qword.
8028 * @param iSegReg The index of the segment register to use for
8029 * this access. The base and limits are checked.
8030 * @param GCPtrMem The address of the guest memory.
8031 */
8032static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8033{
8034 /* The lazy approach for now... */
8035 uint64_t const *pu64Src;
8036 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8037 if (rc == VINF_SUCCESS)
8038 {
8039 *pu64Dst = *pu64Src;
8040 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8041 }
8042 return rc;
8043}
8044
8045
8046/**
8047 * Fetches a descriptor table entry with caller specified error code.
8048 *
8049 * @returns Strict VBox status code.
8050 * @param pIemCpu The IEM per CPU.
8051 * @param pDesc Where to return the descriptor table entry.
8052 * @param uSel The selector which table entry to fetch.
8053 * @param uXcpt The exception to raise on table lookup error.
8054 * @param uErrorCode The error code associated with the exception.
8055 */
8056static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
8057 uint16_t uErrorCode)
8058{
8059 AssertPtr(pDesc);
8060 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8061
8062 /** @todo did the 286 require all 8 bytes to be accessible? */
8063 /*
8064 * Get the selector table base and check bounds.
8065 */
8066 RTGCPTR GCPtrBase;
8067 if (uSel & X86_SEL_LDT)
8068 {
8069 if ( !pCtx->ldtr.Attr.n.u1Present
8070 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8071 {
8072 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8073 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8074 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8075 uErrorCode, 0);
8076 }
8077
8078 Assert(pCtx->ldtr.Attr.n.u1Present);
8079 GCPtrBase = pCtx->ldtr.u64Base;
8080 }
8081 else
8082 {
8083 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8084 {
8085 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8086 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8087 uErrorCode, 0);
8088 }
8089 GCPtrBase = pCtx->gdtr.pGdt;
8090 }
8091
8092 /*
8093 * Read the legacy descriptor and maybe the long mode extensions if
8094 * required.
8095 */
8096 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8097 if (rcStrict == VINF_SUCCESS)
8098 {
8099 if ( !IEM_IS_LONG_MODE(pIemCpu)
8100 || pDesc->Legacy.Gen.u1DescType)
8101 pDesc->Long.au64[1] = 0;
8102 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8103 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8104 else
8105 {
8106 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8107 /** @todo is this the right exception? */
8108 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8109 }
8110 }
8111 return rcStrict;
8112}
8113
8114
8115/**
8116 * Fetches a descriptor table entry.
8117 *
8118 * @returns Strict VBox status code.
8119 * @param pIemCpu The IEM per CPU.
8120 * @param pDesc Where to return the descriptor table entry.
8121 * @param uSel The selector which table entry to fetch.
8122 * @param uXcpt The exception to raise on table lookup error.
8123 */
8124static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8125{
8126 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8127}
8128
8129
8130/**
8131 * Fakes a long mode stack selector for SS = 0.
8132 *
8133 * @param pDescSs Where to return the fake stack descriptor.
8134 * @param uDpl The DPL we want.
8135 */
8136static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8137{
8138 pDescSs->Long.au64[0] = 0;
8139 pDescSs->Long.au64[1] = 0;
8140 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8141 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8142 pDescSs->Long.Gen.u2Dpl = uDpl;
8143 pDescSs->Long.Gen.u1Present = 1;
8144 pDescSs->Long.Gen.u1Long = 1;
8145}
8146
8147
8148/**
8149 * Marks the selector descriptor as accessed (only non-system descriptors).
8150 *
8151 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8152 * will therefore skip the limit checks.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pIemCpu The IEM per CPU.
8156 * @param uSel The selector.
8157 */
8158static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8159{
8160 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8161
8162 /*
8163 * Get the selector table base and calculate the entry address.
8164 */
8165 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8166 ? pCtx->ldtr.u64Base
8167 : pCtx->gdtr.pGdt;
8168 GCPtr += uSel & X86_SEL_MASK;
8169
8170 /*
8171 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8172 * ugly stuff to avoid this. This will make sure it's an atomic access
8173 * as well more or less remove any question about 8-bit or 32-bit accesss.
8174 */
8175 VBOXSTRICTRC rcStrict;
8176 uint32_t volatile *pu32;
8177 if ((GCPtr & 3) == 0)
8178 {
8179 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8180 GCPtr += 2 + 2;
8181 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8182 if (rcStrict != VINF_SUCCESS)
8183 return rcStrict;
8184 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8185 }
8186 else
8187 {
8188 /* The misaligned GDT/LDT case, map the whole thing. */
8189 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8190 if (rcStrict != VINF_SUCCESS)
8191 return rcStrict;
8192 switch ((uintptr_t)pu32 & 3)
8193 {
8194 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8195 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8196 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8197 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8198 }
8199 }
8200
8201 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8202}
8203
8204/** @} */
8205
8206
8207/*
8208 * Include the C/C++ implementation of instruction.
8209 */
8210#include "IEMAllCImpl.cpp.h"
8211
8212
8213
8214/** @name "Microcode" macros.
8215 *
8216 * The idea is that we should be able to use the same code to interpret
8217 * instructions as well as recompiler instructions. Thus this obfuscation.
8218 *
8219 * @{
8220 */
8221#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8222#define IEM_MC_END() }
8223#define IEM_MC_PAUSE() do {} while (0)
8224#define IEM_MC_CONTINUE() do {} while (0)
8225
8226/** Internal macro. */
8227#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8228 do \
8229 { \
8230 VBOXSTRICTRC rcStrict2 = a_Expr; \
8231 if (rcStrict2 != VINF_SUCCESS) \
8232 return rcStrict2; \
8233 } while (0)
8234
8235#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8236#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8237#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8238#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8239#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8240#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8241#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8242
8243#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8244#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8245 do { \
8246 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8247 return iemRaiseDeviceNotAvailable(pIemCpu); \
8248 } while (0)
8249#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8250 do { \
8251 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8252 return iemRaiseMathFault(pIemCpu); \
8253 } while (0)
8254#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8255 do { \
8256 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8257 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8258 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8259 return iemRaiseUndefinedOpcode(pIemCpu); \
8260 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8261 return iemRaiseDeviceNotAvailable(pIemCpu); \
8262 } while (0)
8263#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8264 do { \
8265 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8266 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8267 return iemRaiseUndefinedOpcode(pIemCpu); \
8268 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8269 return iemRaiseDeviceNotAvailable(pIemCpu); \
8270 } while (0)
8271#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8272 do { \
8273 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8274 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8275 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8276 return iemRaiseUndefinedOpcode(pIemCpu); \
8277 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8278 return iemRaiseDeviceNotAvailable(pIemCpu); \
8279 } while (0)
8280#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8281 do { \
8282 if (pIemCpu->uCpl != 0) \
8283 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8284 } while (0)
8285
8286
8287#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8288#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8289#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8290#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8291#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8292#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8293#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8294 uint32_t a_Name; \
8295 uint32_t *a_pName = &a_Name
8296#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8297 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8298
8299#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8300#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8301
8302#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8303#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8304#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8305#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8306#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8307#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8308#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8309#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8310#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8311#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8312#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8313#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8314#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8315#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8316#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8317#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8318#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8319#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8320#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8321#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8322#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8323#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8324#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8325#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8326#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8327#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8328#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8329#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8330#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8331/** @note Not for IOPL or IF testing or modification. */
8332#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8333#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8334#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8335#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8336
8337#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8338#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8339#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8340#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8341#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8342#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8343#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8344#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8345#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8346#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8347#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8348 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8349
8350#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8351#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8352/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8353 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8354#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8355#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8356/** @note Not for IOPL or IF testing or modification. */
8357#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8358
8359#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8360#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8361#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8362 do { \
8363 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8364 *pu32Reg += (a_u32Value); \
8365 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8366 } while (0)
8367#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8368
8369#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8370#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8371#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8372 do { \
8373 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8374 *pu32Reg -= (a_u32Value); \
8375 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8376 } while (0)
8377#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8378
8379#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8380#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8381#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8382#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8383#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8384#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8385#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8386
8387#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8388#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8389#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8390#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8391
8392#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8393#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8394#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8395
8396#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8397#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8398
8399#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8400#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8401#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8402
8403#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8404#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8405#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8406
8407#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8408
8409#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8410
8411#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8412#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8413#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8414 do { \
8415 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8416 *pu32Reg &= (a_u32Value); \
8417 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8418 } while (0)
8419#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8420
8421#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8422#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8423#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8424 do { \
8425 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8426 *pu32Reg |= (a_u32Value); \
8427 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8428 } while (0)
8429#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8430
8431
8432/** @note Not for IOPL or IF modification. */
8433#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8434/** @note Not for IOPL or IF modification. */
8435#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8436/** @note Not for IOPL or IF modification. */
8437#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8438
8439#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8440
8441
8442#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8443 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8444#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8445 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8446#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8447 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8448#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8449 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8450#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8451 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8452#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8453 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8454#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8455 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8456
8457#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8458 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8459#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8460 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8461#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8462 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8463#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8464 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8465#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8466 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8467 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8468 } while (0)
8469#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8470 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8471 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8472 } while (0)
8473#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8474 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8475#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8476 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8477#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8478 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8479
8480#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8482#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8484#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8485 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8486
8487#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8488 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8489#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8490 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8491#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8492 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8493
8494#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8495 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8496#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8497 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8498#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8499 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8500
8501#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8502 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8503
8504#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8505 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8506#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8507 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8508#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8509 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8510#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8511 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8512
8513#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8514 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8515#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8516 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8517#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8518 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8519
8520#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8522#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8523 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8524
8525
8526
8527#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8528 do { \
8529 uint8_t u8Tmp; \
8530 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8531 (a_u16Dst) = u8Tmp; \
8532 } while (0)
8533#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8534 do { \
8535 uint8_t u8Tmp; \
8536 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8537 (a_u32Dst) = u8Tmp; \
8538 } while (0)
8539#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8540 do { \
8541 uint8_t u8Tmp; \
8542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8543 (a_u64Dst) = u8Tmp; \
8544 } while (0)
8545#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8546 do { \
8547 uint16_t u16Tmp; \
8548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8549 (a_u32Dst) = u16Tmp; \
8550 } while (0)
8551#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8552 do { \
8553 uint16_t u16Tmp; \
8554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8555 (a_u64Dst) = u16Tmp; \
8556 } while (0)
8557#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8558 do { \
8559 uint32_t u32Tmp; \
8560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8561 (a_u64Dst) = u32Tmp; \
8562 } while (0)
8563
8564#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8565 do { \
8566 uint8_t u8Tmp; \
8567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8568 (a_u16Dst) = (int8_t)u8Tmp; \
8569 } while (0)
8570#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8571 do { \
8572 uint8_t u8Tmp; \
8573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8574 (a_u32Dst) = (int8_t)u8Tmp; \
8575 } while (0)
8576#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8577 do { \
8578 uint8_t u8Tmp; \
8579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8580 (a_u64Dst) = (int8_t)u8Tmp; \
8581 } while (0)
8582#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8583 do { \
8584 uint16_t u16Tmp; \
8585 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8586 (a_u32Dst) = (int16_t)u16Tmp; \
8587 } while (0)
8588#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8589 do { \
8590 uint16_t u16Tmp; \
8591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8592 (a_u64Dst) = (int16_t)u16Tmp; \
8593 } while (0)
8594#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8595 do { \
8596 uint32_t u32Tmp; \
8597 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8598 (a_u64Dst) = (int32_t)u32Tmp; \
8599 } while (0)
8600
8601#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8602 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8603#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8604 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8605#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8606 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8607#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8608 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8609
8610#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8611 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8612#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8613 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8614#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8615 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8616#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8617 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8618
8619#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8620#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8621#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8622#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8623#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8624#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8625#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8626 do { \
8627 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8628 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8629 } while (0)
8630
8631#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8632 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8633#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8634 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8635
8636
8637#define IEM_MC_PUSH_U16(a_u16Value) \
8638 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8639#define IEM_MC_PUSH_U32(a_u32Value) \
8640 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8641#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8642 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8643#define IEM_MC_PUSH_U64(a_u64Value) \
8644 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8645
8646#define IEM_MC_POP_U16(a_pu16Value) \
8647 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8648#define IEM_MC_POP_U32(a_pu32Value) \
8649 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8650#define IEM_MC_POP_U64(a_pu64Value) \
8651 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8652
8653/** Maps guest memory for direct or bounce buffered access.
8654 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8655 * @remarks May return.
8656 */
8657#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8658 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8659
8660/** Maps guest memory for direct or bounce buffered access.
8661 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8662 * @remarks May return.
8663 */
8664#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8666
8667/** Commits the memory and unmaps the guest memory.
8668 * @remarks May return.
8669 */
8670#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8671 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8672
8673/** Commits the memory and unmaps the guest memory unless the FPU status word
8674 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8675 * that would cause FLD not to store.
8676 *
8677 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8678 * store, while \#P will not.
8679 *
8680 * @remarks May in theory return - for now.
8681 */
8682#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8683 do { \
8684 if ( !(a_u16FSW & X86_FSW_ES) \
8685 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8686 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8687 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8688 } while (0)
8689
8690/** Calculate efficient address from R/M. */
8691#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8692 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8693
8694#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8695#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8696#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8697#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8698#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8699#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8700#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8701
8702/**
8703 * Defers the rest of the instruction emulation to a C implementation routine
8704 * and returns, only taking the standard parameters.
8705 *
8706 * @param a_pfnCImpl The pointer to the C routine.
8707 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8708 */
8709#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8710
8711/**
8712 * Defers the rest of instruction emulation to a C implementation routine and
8713 * returns, taking one argument in addition to the standard ones.
8714 *
8715 * @param a_pfnCImpl The pointer to the C routine.
8716 * @param a0 The argument.
8717 */
8718#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8719
8720/**
8721 * Defers the rest of the instruction emulation to a C implementation routine
8722 * and returns, taking two arguments in addition to the standard ones.
8723 *
8724 * @param a_pfnCImpl The pointer to the C routine.
8725 * @param a0 The first extra argument.
8726 * @param a1 The second extra argument.
8727 */
8728#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8729
8730/**
8731 * Defers the rest of the instruction emulation to a C implementation routine
8732 * and returns, taking three arguments in addition to the standard ones.
8733 *
8734 * @param a_pfnCImpl The pointer to the C routine.
8735 * @param a0 The first extra argument.
8736 * @param a1 The second extra argument.
8737 * @param a2 The third extra argument.
8738 */
8739#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8740
8741/**
8742 * Defers the rest of the instruction emulation to a C implementation routine
8743 * and returns, taking four arguments in addition to the standard ones.
8744 *
8745 * @param a_pfnCImpl The pointer to the C routine.
8746 * @param a0 The first extra argument.
8747 * @param a1 The second extra argument.
8748 * @param a2 The third extra argument.
8749 * @param a3 The fourth extra argument.
8750 */
8751#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8752
8753/**
8754 * Defers the rest of the instruction emulation to a C implementation routine
8755 * and returns, taking two arguments in addition to the standard ones.
8756 *
8757 * @param a_pfnCImpl The pointer to the C routine.
8758 * @param a0 The first extra argument.
8759 * @param a1 The second extra argument.
8760 * @param a2 The third extra argument.
8761 * @param a3 The fourth extra argument.
8762 * @param a4 The fifth extra argument.
8763 */
8764#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8765
8766/**
8767 * Defers the entire instruction emulation to a C implementation routine and
8768 * returns, only taking the standard parameters.
8769 *
8770 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8771 *
8772 * @param a_pfnCImpl The pointer to the C routine.
8773 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8774 */
8775#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8776
8777/**
8778 * Defers the entire instruction emulation to a C implementation routine and
8779 * returns, taking one argument in addition to the standard ones.
8780 *
8781 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8782 *
8783 * @param a_pfnCImpl The pointer to the C routine.
8784 * @param a0 The argument.
8785 */
8786#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8787
8788/**
8789 * Defers the entire instruction emulation to a C implementation routine and
8790 * returns, taking two arguments in addition to the standard ones.
8791 *
8792 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8793 *
8794 * @param a_pfnCImpl The pointer to the C routine.
8795 * @param a0 The first extra argument.
8796 * @param a1 The second extra argument.
8797 */
8798#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8799
8800/**
8801 * Defers the entire instruction emulation to a C implementation routine and
8802 * returns, taking three arguments in addition to the standard ones.
8803 *
8804 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8805 *
8806 * @param a_pfnCImpl The pointer to the C routine.
8807 * @param a0 The first extra argument.
8808 * @param a1 The second extra argument.
8809 * @param a2 The third extra argument.
8810 */
8811#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8812
8813/**
8814 * Calls a FPU assembly implementation taking one visible argument.
8815 *
8816 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8817 * @param a0 The first extra argument.
8818 */
8819#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8820 do { \
8821 iemFpuPrepareUsage(pIemCpu); \
8822 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8823 } while (0)
8824
8825/**
8826 * Calls a FPU assembly implementation taking two visible arguments.
8827 *
8828 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8829 * @param a0 The first extra argument.
8830 * @param a1 The second extra argument.
8831 */
8832#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8833 do { \
8834 iemFpuPrepareUsage(pIemCpu); \
8835 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8836 } while (0)
8837
8838/**
8839 * Calls a FPU assembly implementation taking three visible arguments.
8840 *
8841 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8842 * @param a0 The first extra argument.
8843 * @param a1 The second extra argument.
8844 * @param a2 The third extra argument.
8845 */
8846#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8847 do { \
8848 iemFpuPrepareUsage(pIemCpu); \
8849 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
8850 } while (0)
8851
8852#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
8853 do { \
8854 (a_FpuData).FSW = (a_FSW); \
8855 (a_FpuData).r80Result = *(a_pr80Value); \
8856 } while (0)
8857
8858/** Pushes FPU result onto the stack. */
8859#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
8860 iemFpuPushResult(pIemCpu, &a_FpuData)
8861/** Pushes FPU result onto the stack and sets the FPUDP. */
8862#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
8863 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
8864
8865/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
8866#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
8867 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
8868
8869/** Stores FPU result in a stack register. */
8870#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
8871 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
8872/** Stores FPU result in a stack register and pops the stack. */
8873#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
8874 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
8875/** Stores FPU result in a stack register and sets the FPUDP. */
8876#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8877 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8878/** Stores FPU result in a stack register, sets the FPUDP, and pops the
8879 * stack. */
8880#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8881 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8882
8883/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
8884#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
8885 iemFpuUpdateOpcodeAndIp(pIemCpu)
8886/** Free a stack register (for FFREE and FFREEP). */
8887#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
8888 iemFpuStackFree(pIemCpu, a_iStReg)
8889/** Increment the FPU stack pointer. */
8890#define IEM_MC_FPU_STACK_INC_TOP() \
8891 iemFpuStackIncTop(pIemCpu)
8892/** Decrement the FPU stack pointer. */
8893#define IEM_MC_FPU_STACK_DEC_TOP() \
8894 iemFpuStackDecTop(pIemCpu)
8895
8896/** Updates the FSW, FOP, FPUIP, and FPUCS. */
8897#define IEM_MC_UPDATE_FSW(a_u16FSW) \
8898 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8899/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
8900#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
8901 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8902/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
8903#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8904 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8905/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
8906#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
8907 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8908/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
8909 * stack. */
8910#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8911 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8912/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
8913#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
8914 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8915
8916/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
8917#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
8918 iemFpuStackUnderflow(pIemCpu, a_iStDst)
8919/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8920 * stack. */
8921#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
8922 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
8923/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8924 * FPUDS. */
8925#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8926 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8927/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8928 * FPUDS. Pops stack. */
8929#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8930 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8931/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8932 * stack twice. */
8933#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8934 iemFpuStackUnderflowThenPopPop(pIemCpu)
8935/** Raises a FPU stack underflow exception for an instruction pushing a result
8936 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8937#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8938 iemFpuStackPushUnderflow(pIemCpu)
8939/** Raises a FPU stack underflow exception for an instruction pushing a result
8940 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8941#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8942 iemFpuStackPushUnderflowTwo(pIemCpu)
8943
8944/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8945 * FPUIP, FPUCS and FOP. */
8946#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
8947 iemFpuStackPushOverflow(pIemCpu)
8948/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8949 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
8950#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
8951 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
8952/** Indicates that we (might) have modified the FPU state. */
8953#define IEM_MC_USED_FPU() \
8954 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
8955
8956/**
8957 * Calls a MMX assembly implementation taking two visible arguments.
8958 *
8959 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8960 * @param a0 The first extra argument.
8961 * @param a1 The second extra argument.
8962 */
8963#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
8964 do { \
8965 iemFpuPrepareUsage(pIemCpu); \
8966 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8967 } while (0)
8968
8969/**
8970 * Calls a MMX assembly implementation taking three visible arguments.
8971 *
8972 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 * @param a2 The third extra argument.
8976 */
8977#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8978 do { \
8979 iemFpuPrepareUsage(pIemCpu); \
8980 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
8981 } while (0)
8982
8983
8984/**
8985 * Calls a SSE assembly implementation taking two visible arguments.
8986 *
8987 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8988 * @param a0 The first extra argument.
8989 * @param a1 The second extra argument.
8990 */
8991#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
8992 do { \
8993 iemFpuPrepareUsageSse(pIemCpu); \
8994 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8995 } while (0)
8996
8997/**
8998 * Calls a SSE assembly implementation taking three visible arguments.
8999 *
9000 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9001 * @param a0 The first extra argument.
9002 * @param a1 The second extra argument.
9003 * @param a2 The third extra argument.
9004 */
9005#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9006 do { \
9007 iemFpuPrepareUsageSse(pIemCpu); \
9008 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9009 } while (0)
9010
9011
9012/** @note Not for IOPL or IF testing. */
9013#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9014/** @note Not for IOPL or IF testing. */
9015#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9016/** @note Not for IOPL or IF testing. */
9017#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9018/** @note Not for IOPL or IF testing. */
9019#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9020/** @note Not for IOPL or IF testing. */
9021#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9022 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9023 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9024/** @note Not for IOPL or IF testing. */
9025#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9026 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9027 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9028/** @note Not for IOPL or IF testing. */
9029#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9030 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9031 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9032 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9033/** @note Not for IOPL or IF testing. */
9034#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9035 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9036 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9037 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9038#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9039#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9040#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9041/** @note Not for IOPL or IF testing. */
9042#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9043 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9044 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9045/** @note Not for IOPL or IF testing. */
9046#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9047 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9048 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9049/** @note Not for IOPL or IF testing. */
9050#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9051 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9052 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9053/** @note Not for IOPL or IF testing. */
9054#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9055 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9056 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9057/** @note Not for IOPL or IF testing. */
9058#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9059 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9060 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9061/** @note Not for IOPL or IF testing. */
9062#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9063 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9064 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9065#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9066#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9067#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9068 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9069#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9070 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9071#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9072 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9073#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9074 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9075#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9076 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9077#define IEM_MC_IF_FCW_IM() \
9078 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9079
9080#define IEM_MC_ELSE() } else {
9081#define IEM_MC_ENDIF() } do {} while (0)
9082
9083/** @} */
9084
9085
9086/** @name Opcode Debug Helpers.
9087 * @{
9088 */
9089#ifdef DEBUG
9090# define IEMOP_MNEMONIC(a_szMnemonic) \
9091 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9092 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9093# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9094 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9095 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9096#else
9097# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9098# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9099#endif
9100
9101/** @} */
9102
9103
9104/** @name Opcode Helpers.
9105 * @{
9106 */
9107
9108/** The instruction raises an \#UD in real and V8086 mode. */
9109#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9110 do \
9111 { \
9112 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9113 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9114 } while (0)
9115
9116/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9117 * lock prefixed.
9118 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9119#define IEMOP_HLP_NO_LOCK_PREFIX() \
9120 do \
9121 { \
9122 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9123 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9124 } while (0)
9125
9126/** The instruction is not available in 64-bit mode, throw #UD if we're in
9127 * 64-bit mode. */
9128#define IEMOP_HLP_NO_64BIT() \
9129 do \
9130 { \
9131 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9132 return IEMOP_RAISE_INVALID_OPCODE(); \
9133 } while (0)
9134
9135/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9136 * 64-bit mode. */
9137#define IEMOP_HLP_ONLY_64BIT() \
9138 do \
9139 { \
9140 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9141 return IEMOP_RAISE_INVALID_OPCODE(); \
9142 } while (0)
9143
9144/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9145#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9146 do \
9147 { \
9148 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9149 iemRecalEffOpSize64Default(pIemCpu); \
9150 } while (0)
9151
9152/** The instruction has 64-bit operand size if 64-bit mode. */
9153#define IEMOP_HLP_64BIT_OP_SIZE() \
9154 do \
9155 { \
9156 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9157 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9158 } while (0)
9159
9160/** Only a REX prefix immediately preceeding the first opcode byte takes
9161 * effect. This macro helps ensuring this as well as logging bad guest code. */
9162#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9163 do \
9164 { \
9165 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9166 { \
9167 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9168 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9169 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9170 pIemCpu->uRexB = 0; \
9171 pIemCpu->uRexIndex = 0; \
9172 pIemCpu->uRexReg = 0; \
9173 iemRecalEffOpSize(pIemCpu); \
9174 } \
9175 } while (0)
9176
9177/**
9178 * Done decoding.
9179 */
9180#define IEMOP_HLP_DONE_DECODING() \
9181 do \
9182 { \
9183 /*nothing for now, maybe later... */ \
9184 } while (0)
9185
9186/**
9187 * Done decoding, raise \#UD exception if lock prefix present.
9188 */
9189#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9190 do \
9191 { \
9192 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9193 { /* likely */ } \
9194 else \
9195 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9196 } while (0)
9197#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9198 do \
9199 { \
9200 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9201 { /* likely */ } \
9202 else \
9203 { \
9204 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9205 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9206 } \
9207 } while (0)
9208#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9209 do \
9210 { \
9211 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9212 { /* likely */ } \
9213 else \
9214 { \
9215 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9216 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9217 } \
9218 } while (0)
9219/**
9220 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9221 * are present.
9222 */
9223#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9224 do \
9225 { \
9226 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9227 { /* likely */ } \
9228 else \
9229 return IEMOP_RAISE_INVALID_OPCODE(); \
9230 } while (0)
9231
9232
9233/**
9234 * Calculates the effective address of a ModR/M memory operand.
9235 *
9236 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9237 *
9238 * @return Strict VBox status code.
9239 * @param pIemCpu The IEM per CPU data.
9240 * @param bRm The ModRM byte.
9241 * @param cbImm The size of any immediate following the
9242 * effective address opcode bytes. Important for
9243 * RIP relative addressing.
9244 * @param pGCPtrEff Where to return the effective address.
9245 */
9246static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9247{
9248 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9249 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9250#define SET_SS_DEF() \
9251 do \
9252 { \
9253 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9254 pIemCpu->iEffSeg = X86_SREG_SS; \
9255 } while (0)
9256
9257 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9258 {
9259/** @todo Check the effective address size crap! */
9260 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9261 {
9262 uint16_t u16EffAddr;
9263
9264 /* Handle the disp16 form with no registers first. */
9265 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9266 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9267 else
9268 {
9269 /* Get the displacment. */
9270 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9271 {
9272 case 0: u16EffAddr = 0; break;
9273 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9274 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9275 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9276 }
9277
9278 /* Add the base and index registers to the disp. */
9279 switch (bRm & X86_MODRM_RM_MASK)
9280 {
9281 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9282 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9283 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9284 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9285 case 4: u16EffAddr += pCtx->si; break;
9286 case 5: u16EffAddr += pCtx->di; break;
9287 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9288 case 7: u16EffAddr += pCtx->bx; break;
9289 }
9290 }
9291
9292 *pGCPtrEff = u16EffAddr;
9293 }
9294 else
9295 {
9296 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9297 uint32_t u32EffAddr;
9298
9299 /* Handle the disp32 form with no registers first. */
9300 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9301 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9302 else
9303 {
9304 /* Get the register (or SIB) value. */
9305 switch ((bRm & X86_MODRM_RM_MASK))
9306 {
9307 case 0: u32EffAddr = pCtx->eax; break;
9308 case 1: u32EffAddr = pCtx->ecx; break;
9309 case 2: u32EffAddr = pCtx->edx; break;
9310 case 3: u32EffAddr = pCtx->ebx; break;
9311 case 4: /* SIB */
9312 {
9313 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9314
9315 /* Get the index and scale it. */
9316 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9317 {
9318 case 0: u32EffAddr = pCtx->eax; break;
9319 case 1: u32EffAddr = pCtx->ecx; break;
9320 case 2: u32EffAddr = pCtx->edx; break;
9321 case 3: u32EffAddr = pCtx->ebx; break;
9322 case 4: u32EffAddr = 0; /*none */ break;
9323 case 5: u32EffAddr = pCtx->ebp; break;
9324 case 6: u32EffAddr = pCtx->esi; break;
9325 case 7: u32EffAddr = pCtx->edi; break;
9326 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9327 }
9328 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9329
9330 /* add base */
9331 switch (bSib & X86_SIB_BASE_MASK)
9332 {
9333 case 0: u32EffAddr += pCtx->eax; break;
9334 case 1: u32EffAddr += pCtx->ecx; break;
9335 case 2: u32EffAddr += pCtx->edx; break;
9336 case 3: u32EffAddr += pCtx->ebx; break;
9337 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9338 case 5:
9339 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9340 {
9341 u32EffAddr += pCtx->ebp;
9342 SET_SS_DEF();
9343 }
9344 else
9345 {
9346 uint32_t u32Disp;
9347 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9348 u32EffAddr += u32Disp;
9349 }
9350 break;
9351 case 6: u32EffAddr += pCtx->esi; break;
9352 case 7: u32EffAddr += pCtx->edi; break;
9353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9354 }
9355 break;
9356 }
9357 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9358 case 6: u32EffAddr = pCtx->esi; break;
9359 case 7: u32EffAddr = pCtx->edi; break;
9360 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9361 }
9362
9363 /* Get and add the displacement. */
9364 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9365 {
9366 case 0:
9367 break;
9368 case 1:
9369 {
9370 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9371 u32EffAddr += i8Disp;
9372 break;
9373 }
9374 case 2:
9375 {
9376 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9377 u32EffAddr += u32Disp;
9378 break;
9379 }
9380 default:
9381 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9382 }
9383
9384 }
9385 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9386 *pGCPtrEff = u32EffAddr;
9387 else
9388 {
9389 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9390 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9391 }
9392 }
9393 }
9394 else
9395 {
9396 uint64_t u64EffAddr;
9397
9398 /* Handle the rip+disp32 form with no registers first. */
9399 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9400 {
9401 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9402 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9403 }
9404 else
9405 {
9406 /* Get the register (or SIB) value. */
9407 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9408 {
9409 case 0: u64EffAddr = pCtx->rax; break;
9410 case 1: u64EffAddr = pCtx->rcx; break;
9411 case 2: u64EffAddr = pCtx->rdx; break;
9412 case 3: u64EffAddr = pCtx->rbx; break;
9413 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9414 case 6: u64EffAddr = pCtx->rsi; break;
9415 case 7: u64EffAddr = pCtx->rdi; break;
9416 case 8: u64EffAddr = pCtx->r8; break;
9417 case 9: u64EffAddr = pCtx->r9; break;
9418 case 10: u64EffAddr = pCtx->r10; break;
9419 case 11: u64EffAddr = pCtx->r11; break;
9420 case 13: u64EffAddr = pCtx->r13; break;
9421 case 14: u64EffAddr = pCtx->r14; break;
9422 case 15: u64EffAddr = pCtx->r15; break;
9423 /* SIB */
9424 case 4:
9425 case 12:
9426 {
9427 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9428
9429 /* Get the index and scale it. */
9430 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9431 {
9432 case 0: u64EffAddr = pCtx->rax; break;
9433 case 1: u64EffAddr = pCtx->rcx; break;
9434 case 2: u64EffAddr = pCtx->rdx; break;
9435 case 3: u64EffAddr = pCtx->rbx; break;
9436 case 4: u64EffAddr = 0; /*none */ break;
9437 case 5: u64EffAddr = pCtx->rbp; break;
9438 case 6: u64EffAddr = pCtx->rsi; break;
9439 case 7: u64EffAddr = pCtx->rdi; break;
9440 case 8: u64EffAddr = pCtx->r8; break;
9441 case 9: u64EffAddr = pCtx->r9; break;
9442 case 10: u64EffAddr = pCtx->r10; break;
9443 case 11: u64EffAddr = pCtx->r11; break;
9444 case 12: u64EffAddr = pCtx->r12; break;
9445 case 13: u64EffAddr = pCtx->r13; break;
9446 case 14: u64EffAddr = pCtx->r14; break;
9447 case 15: u64EffAddr = pCtx->r15; break;
9448 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9449 }
9450 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9451
9452 /* add base */
9453 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9454 {
9455 case 0: u64EffAddr += pCtx->rax; break;
9456 case 1: u64EffAddr += pCtx->rcx; break;
9457 case 2: u64EffAddr += pCtx->rdx; break;
9458 case 3: u64EffAddr += pCtx->rbx; break;
9459 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9460 case 6: u64EffAddr += pCtx->rsi; break;
9461 case 7: u64EffAddr += pCtx->rdi; break;
9462 case 8: u64EffAddr += pCtx->r8; break;
9463 case 9: u64EffAddr += pCtx->r9; break;
9464 case 10: u64EffAddr += pCtx->r10; break;
9465 case 11: u64EffAddr += pCtx->r11; break;
9466 case 12: u64EffAddr += pCtx->r12; break;
9467 case 14: u64EffAddr += pCtx->r14; break;
9468 case 15: u64EffAddr += pCtx->r15; break;
9469 /* complicated encodings */
9470 case 5:
9471 case 13:
9472 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9473 {
9474 if (!pIemCpu->uRexB)
9475 {
9476 u64EffAddr += pCtx->rbp;
9477 SET_SS_DEF();
9478 }
9479 else
9480 u64EffAddr += pCtx->r13;
9481 }
9482 else
9483 {
9484 uint32_t u32Disp;
9485 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9486 u64EffAddr += (int32_t)u32Disp;
9487 }
9488 break;
9489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9490 }
9491 break;
9492 }
9493 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9494 }
9495
9496 /* Get and add the displacement. */
9497 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9498 {
9499 case 0:
9500 break;
9501 case 1:
9502 {
9503 int8_t i8Disp;
9504 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9505 u64EffAddr += i8Disp;
9506 break;
9507 }
9508 case 2:
9509 {
9510 uint32_t u32Disp;
9511 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9512 u64EffAddr += (int32_t)u32Disp;
9513 break;
9514 }
9515 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9516 }
9517
9518 }
9519
9520 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9521 *pGCPtrEff = u64EffAddr;
9522 else
9523 {
9524 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9525 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9526 }
9527 }
9528
9529 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9530 return VINF_SUCCESS;
9531}
9532
9533/** @} */
9534
9535
9536
9537/*
9538 * Include the instructions
9539 */
9540#include "IEMAllInstructions.cpp.h"
9541
9542
9543
9544
9545#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9546
9547/**
9548 * Sets up execution verification mode.
9549 */
9550static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9551{
9552 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9553 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9554
9555 /*
9556 * Always note down the address of the current instruction.
9557 */
9558 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9559 pIemCpu->uOldRip = pOrgCtx->rip;
9560
9561 /*
9562 * Enable verification and/or logging.
9563 */
9564 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9565 if ( fNewNoRem
9566 && ( 0
9567#if 0 /* auto enable on first paged protected mode interrupt */
9568 || ( pOrgCtx->eflags.Bits.u1IF
9569 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9570 && TRPMHasTrap(pVCpu)
9571 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9572#endif
9573#if 0
9574 || ( pOrgCtx->cs == 0x10
9575 && ( pOrgCtx->rip == 0x90119e3e
9576 || pOrgCtx->rip == 0x901d9810)
9577#endif
9578#if 0 /* Auto enable DSL - FPU stuff. */
9579 || ( pOrgCtx->cs == 0x10
9580 && (// pOrgCtx->rip == 0xc02ec07f
9581 //|| pOrgCtx->rip == 0xc02ec082
9582 //|| pOrgCtx->rip == 0xc02ec0c9
9583 0
9584 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9585#endif
9586#if 0 /* Auto enable DSL - fstp st0 stuff. */
9587 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9588#endif
9589#if 0
9590 || pOrgCtx->rip == 0x9022bb3a
9591#endif
9592#if 0
9593 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9594#endif
9595#if 0
9596 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9597 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9598#endif
9599#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9600 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9601 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9602 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9603#endif
9604#if 0 /* NT4SP1 - xadd early boot. */
9605 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9606#endif
9607#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9608 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9609#endif
9610#if 0 /* NT4SP1 - cmpxchg (AMD). */
9611 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9612#endif
9613#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9614 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9615#endif
9616#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9617 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9618
9619#endif
9620#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9621 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9622
9623#endif
9624#if 0 /* NT4SP1 - frstor [ecx] */
9625 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9626#endif
9627#if 0 /* xxxxxx - All long mode code. */
9628 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9629#endif
9630#if 0 /* rep movsq linux 3.7 64-bit boot. */
9631 || (pOrgCtx->rip == 0x0000000000100241)
9632#endif
9633#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9634 || (pOrgCtx->rip == 0x000000000215e240)
9635#endif
9636#if 0 /* DOS's size-overridden iret to v8086. */
9637 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9638#endif
9639 )
9640 )
9641 {
9642 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9643 RTLogFlags(NULL, "enabled");
9644 fNewNoRem = false;
9645 }
9646 if (fNewNoRem != pIemCpu->fNoRem)
9647 {
9648 pIemCpu->fNoRem = fNewNoRem;
9649 if (!fNewNoRem)
9650 {
9651 LogAlways(("Enabling verification mode!\n"));
9652 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9653 }
9654 else
9655 LogAlways(("Disabling verification mode!\n"));
9656 }
9657
9658 /*
9659 * Switch state.
9660 */
9661 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9662 {
9663 static CPUMCTX s_DebugCtx; /* Ugly! */
9664
9665 s_DebugCtx = *pOrgCtx;
9666 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9667 }
9668
9669 /*
9670 * See if there is an interrupt pending in TRPM and inject it if we can.
9671 */
9672 pIemCpu->uInjectCpl = UINT8_MAX;
9673 if ( pOrgCtx->eflags.Bits.u1IF
9674 && TRPMHasTrap(pVCpu)
9675 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9676 {
9677 uint8_t u8TrapNo;
9678 TRPMEVENT enmType;
9679 RTGCUINT uErrCode;
9680 RTGCPTR uCr2;
9681 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9682 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9683 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9684 TRPMResetTrap(pVCpu);
9685 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9686 }
9687
9688 /*
9689 * Reset the counters.
9690 */
9691 pIemCpu->cIOReads = 0;
9692 pIemCpu->cIOWrites = 0;
9693 pIemCpu->fIgnoreRaxRdx = false;
9694 pIemCpu->fOverlappingMovs = false;
9695 pIemCpu->fProblematicMemory = false;
9696 pIemCpu->fUndefinedEFlags = 0;
9697
9698 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9699 {
9700 /*
9701 * Free all verification records.
9702 */
9703 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9704 pIemCpu->pIemEvtRecHead = NULL;
9705 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9706 do
9707 {
9708 while (pEvtRec)
9709 {
9710 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9711 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9712 pIemCpu->pFreeEvtRec = pEvtRec;
9713 pEvtRec = pNext;
9714 }
9715 pEvtRec = pIemCpu->pOtherEvtRecHead;
9716 pIemCpu->pOtherEvtRecHead = NULL;
9717 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9718 } while (pEvtRec);
9719 }
9720}
9721
9722
9723/**
9724 * Allocate an event record.
9725 * @returns Pointer to a record.
9726 */
9727static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9728{
9729 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9730 return NULL;
9731
9732 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9733 if (pEvtRec)
9734 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9735 else
9736 {
9737 if (!pIemCpu->ppIemEvtRecNext)
9738 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9739
9740 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9741 if (!pEvtRec)
9742 return NULL;
9743 }
9744 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9745 pEvtRec->pNext = NULL;
9746 return pEvtRec;
9747}
9748
9749
9750/**
9751 * IOMMMIORead notification.
9752 */
9753VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9754{
9755 PVMCPU pVCpu = VMMGetCpu(pVM);
9756 if (!pVCpu)
9757 return;
9758 PIEMCPU pIemCpu = &pVCpu->iem.s;
9759 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9760 if (!pEvtRec)
9761 return;
9762 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9763 pEvtRec->u.RamRead.GCPhys = GCPhys;
9764 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9765 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9766 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9767}
9768
9769
9770/**
9771 * IOMMMIOWrite notification.
9772 */
9773VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9774{
9775 PVMCPU pVCpu = VMMGetCpu(pVM);
9776 if (!pVCpu)
9777 return;
9778 PIEMCPU pIemCpu = &pVCpu->iem.s;
9779 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9780 if (!pEvtRec)
9781 return;
9782 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9783 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9784 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9785 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9786 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9787 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9788 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9789 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9790 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9791}
9792
9793
9794/**
9795 * IOMIOPortRead notification.
9796 */
9797VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9798{
9799 PVMCPU pVCpu = VMMGetCpu(pVM);
9800 if (!pVCpu)
9801 return;
9802 PIEMCPU pIemCpu = &pVCpu->iem.s;
9803 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9804 if (!pEvtRec)
9805 return;
9806 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9807 pEvtRec->u.IOPortRead.Port = Port;
9808 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9809 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9810 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9811}
9812
9813/**
9814 * IOMIOPortWrite notification.
9815 */
9816VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9817{
9818 PVMCPU pVCpu = VMMGetCpu(pVM);
9819 if (!pVCpu)
9820 return;
9821 PIEMCPU pIemCpu = &pVCpu->iem.s;
9822 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9823 if (!pEvtRec)
9824 return;
9825 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9826 pEvtRec->u.IOPortWrite.Port = Port;
9827 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9828 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9829 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9830 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9831}
9832
9833
9834VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9835{
9836 AssertFailed();
9837}
9838
9839
9840VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
9841{
9842 AssertFailed();
9843}
9844
9845
9846/**
9847 * Fakes and records an I/O port read.
9848 *
9849 * @returns VINF_SUCCESS.
9850 * @param pIemCpu The IEM per CPU data.
9851 * @param Port The I/O port.
9852 * @param pu32Value Where to store the fake value.
9853 * @param cbValue The size of the access.
9854 */
9855static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9856{
9857 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9858 if (pEvtRec)
9859 {
9860 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9861 pEvtRec->u.IOPortRead.Port = Port;
9862 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9863 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9864 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9865 }
9866 pIemCpu->cIOReads++;
9867 *pu32Value = 0xcccccccc;
9868 return VINF_SUCCESS;
9869}
9870
9871
9872/**
9873 * Fakes and records an I/O port write.
9874 *
9875 * @returns VINF_SUCCESS.
9876 * @param pIemCpu The IEM per CPU data.
9877 * @param Port The I/O port.
9878 * @param u32Value The value being written.
9879 * @param cbValue The size of the access.
9880 */
9881static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9882{
9883 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9884 if (pEvtRec)
9885 {
9886 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9887 pEvtRec->u.IOPortWrite.Port = Port;
9888 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9889 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9890 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9891 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9892 }
9893 pIemCpu->cIOWrites++;
9894 return VINF_SUCCESS;
9895}
9896
9897
9898/**
9899 * Used to add extra details about a stub case.
9900 * @param pIemCpu The IEM per CPU state.
9901 */
9902static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
9903{
9904 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9905 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9906 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9907 char szRegs[4096];
9908 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
9909 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
9910 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
9911 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
9912 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
9913 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
9914 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
9915 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
9916 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
9917 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
9918 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
9919 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
9920 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
9921 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
9922 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
9923 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
9924 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
9925 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
9926 " efer=%016VR{efer}\n"
9927 " pat=%016VR{pat}\n"
9928 " sf_mask=%016VR{sf_mask}\n"
9929 "krnl_gs_base=%016VR{krnl_gs_base}\n"
9930 " lstar=%016VR{lstar}\n"
9931 " star=%016VR{star} cstar=%016VR{cstar}\n"
9932 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
9933 );
9934
9935 char szInstr1[256];
9936 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
9937 DBGF_DISAS_FLAGS_DEFAULT_MODE,
9938 szInstr1, sizeof(szInstr1), NULL);
9939 char szInstr2[256];
9940 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
9941 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9942 szInstr2, sizeof(szInstr2), NULL);
9943
9944 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9945}
9946
9947
9948/**
9949 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9950 * dump to the assertion info.
9951 *
9952 * @param pEvtRec The record to dump.
9953 */
9954static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9955{
9956 switch (pEvtRec->enmEvent)
9957 {
9958 case IEMVERIFYEVENT_IOPORT_READ:
9959 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9960 pEvtRec->u.IOPortWrite.Port,
9961 pEvtRec->u.IOPortWrite.cbValue);
9962 break;
9963 case IEMVERIFYEVENT_IOPORT_WRITE:
9964 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
9965 pEvtRec->u.IOPortWrite.Port,
9966 pEvtRec->u.IOPortWrite.cbValue,
9967 pEvtRec->u.IOPortWrite.u32Value);
9968 break;
9969 case IEMVERIFYEVENT_RAM_READ:
9970 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
9971 pEvtRec->u.RamRead.GCPhys,
9972 pEvtRec->u.RamRead.cb);
9973 break;
9974 case IEMVERIFYEVENT_RAM_WRITE:
9975 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
9976 pEvtRec->u.RamWrite.GCPhys,
9977 pEvtRec->u.RamWrite.cb,
9978 (int)pEvtRec->u.RamWrite.cb,
9979 pEvtRec->u.RamWrite.ab);
9980 break;
9981 default:
9982 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
9983 break;
9984 }
9985}
9986
9987
9988/**
9989 * Raises an assertion on the specified record, showing the given message with
9990 * a record dump attached.
9991 *
9992 * @param pIemCpu The IEM per CPU data.
9993 * @param pEvtRec1 The first record.
9994 * @param pEvtRec2 The second record.
9995 * @param pszMsg The message explaining why we're asserting.
9996 */
9997static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
9998{
9999 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10000 iemVerifyAssertAddRecordDump(pEvtRec1);
10001 iemVerifyAssertAddRecordDump(pEvtRec2);
10002 iemVerifyAssertMsg2(pIemCpu);
10003 RTAssertPanic();
10004}
10005
10006
10007/**
10008 * Raises an assertion on the specified record, showing the given message with
10009 * a record dump attached.
10010 *
10011 * @param pIemCpu The IEM per CPU data.
10012 * @param pEvtRec1 The first record.
10013 * @param pszMsg The message explaining why we're asserting.
10014 */
10015static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10016{
10017 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10018 iemVerifyAssertAddRecordDump(pEvtRec);
10019 iemVerifyAssertMsg2(pIemCpu);
10020 RTAssertPanic();
10021}
10022
10023
10024/**
10025 * Verifies a write record.
10026 *
10027 * @param pIemCpu The IEM per CPU data.
10028 * @param pEvtRec The write record.
10029 * @param fRem Set if REM was doing the other executing. If clear
10030 * it was HM.
10031 */
10032static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10033{
10034 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10035 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10036 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10037 if ( RT_FAILURE(rc)
10038 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10039 {
10040 /* fend off ins */
10041 if ( !pIemCpu->cIOReads
10042 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10043 || ( pEvtRec->u.RamWrite.cb != 1
10044 && pEvtRec->u.RamWrite.cb != 2
10045 && pEvtRec->u.RamWrite.cb != 4) )
10046 {
10047 /* fend off ROMs and MMIO */
10048 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10049 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10050 {
10051 /* fend off fxsave */
10052 if (pEvtRec->u.RamWrite.cb != 512)
10053 {
10054 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10055 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10056 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10057 RTAssertMsg2Add("%s: %.*Rhxs\n"
10058 "iem: %.*Rhxs\n",
10059 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10060 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10061 iemVerifyAssertAddRecordDump(pEvtRec);
10062 iemVerifyAssertMsg2(pIemCpu);
10063 RTAssertPanic();
10064 }
10065 }
10066 }
10067 }
10068
10069}
10070
10071/**
10072 * Performs the post-execution verfication checks.
10073 */
10074static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10075{
10076 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10077 return;
10078
10079 /*
10080 * Switch back the state.
10081 */
10082 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10083 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10084 Assert(pOrgCtx != pDebugCtx);
10085 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10086
10087 /*
10088 * Execute the instruction in REM.
10089 */
10090 bool fRem = false;
10091 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10092 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10093 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10094#ifdef IEM_VERIFICATION_MODE_FULL_HM
10095 if ( HMIsEnabled(pVM)
10096 && pIemCpu->cIOReads == 0
10097 && pIemCpu->cIOWrites == 0
10098 && !pIemCpu->fProblematicMemory)
10099 {
10100 uint64_t uStartRip = pOrgCtx->rip;
10101 unsigned iLoops = 0;
10102 do
10103 {
10104 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10105 iLoops++;
10106 } while ( rc == VINF_SUCCESS
10107 || ( rc == VINF_EM_DBG_STEPPED
10108 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10109 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10110 || ( pOrgCtx->rip != pDebugCtx->rip
10111 && pIemCpu->uInjectCpl != UINT8_MAX
10112 && iLoops < 8) );
10113 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10114 rc = VINF_SUCCESS;
10115 }
10116#endif
10117 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10118 || rc == VINF_IOM_R3_IOPORT_READ
10119 || rc == VINF_IOM_R3_IOPORT_WRITE
10120 || rc == VINF_IOM_R3_MMIO_READ
10121 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10122 || rc == VINF_IOM_R3_MMIO_WRITE
10123 || rc == VINF_CPUM_R3_MSR_READ
10124 || rc == VINF_CPUM_R3_MSR_WRITE
10125 || rc == VINF_EM_RESCHEDULE
10126 )
10127 {
10128 EMRemLock(pVM);
10129 rc = REMR3EmulateInstruction(pVM, pVCpu);
10130 AssertRC(rc);
10131 EMRemUnlock(pVM);
10132 fRem = true;
10133 }
10134
10135 /*
10136 * Compare the register states.
10137 */
10138 unsigned cDiffs = 0;
10139 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10140 {
10141 //Log(("REM and IEM ends up with different registers!\n"));
10142 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10143
10144# define CHECK_FIELD(a_Field) \
10145 do \
10146 { \
10147 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10148 { \
10149 switch (sizeof(pOrgCtx->a_Field)) \
10150 { \
10151 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10152 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10153 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10154 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10155 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10156 } \
10157 cDiffs++; \
10158 } \
10159 } while (0)
10160# define CHECK_XSTATE_FIELD(a_Field) \
10161 do \
10162 { \
10163 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10164 { \
10165 switch (sizeof(pOrgCtx->a_Field)) \
10166 { \
10167 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10168 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10169 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10170 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10171 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10172 } \
10173 cDiffs++; \
10174 } \
10175 } while (0)
10176
10177# define CHECK_BIT_FIELD(a_Field) \
10178 do \
10179 { \
10180 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10181 { \
10182 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10183 cDiffs++; \
10184 } \
10185 } while (0)
10186
10187# define CHECK_SEL(a_Sel) \
10188 do \
10189 { \
10190 CHECK_FIELD(a_Sel.Sel); \
10191 CHECK_FIELD(a_Sel.Attr.u); \
10192 CHECK_FIELD(a_Sel.u64Base); \
10193 CHECK_FIELD(a_Sel.u32Limit); \
10194 CHECK_FIELD(a_Sel.fFlags); \
10195 } while (0)
10196
10197 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10198 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10199
10200#if 1 /* The recompiler doesn't update these the intel way. */
10201 if (fRem)
10202 {
10203 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10204 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10205 pOrgXState->x87.CS = pDebugXState->x87.CS;
10206 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10207 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10208 pOrgXState->x87.DS = pDebugXState->x87.DS;
10209 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10210 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10211 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10212 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10213 }
10214#endif
10215 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10216 {
10217 RTAssertMsg2Weak(" the FPU state differs\n");
10218 cDiffs++;
10219 CHECK_XSTATE_FIELD(x87.FCW);
10220 CHECK_XSTATE_FIELD(x87.FSW);
10221 CHECK_XSTATE_FIELD(x87.FTW);
10222 CHECK_XSTATE_FIELD(x87.FOP);
10223 CHECK_XSTATE_FIELD(x87.FPUIP);
10224 CHECK_XSTATE_FIELD(x87.CS);
10225 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10226 CHECK_XSTATE_FIELD(x87.FPUDP);
10227 CHECK_XSTATE_FIELD(x87.DS);
10228 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10229 CHECK_XSTATE_FIELD(x87.MXCSR);
10230 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10231 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10232 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10233 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10234 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10235 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10236 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10237 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10238 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10239 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10240 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10241 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10242 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10243 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10244 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10245 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10246 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10247 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10248 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10249 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10250 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10251 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10252 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10253 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10254 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10255 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10256 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10257 }
10258 CHECK_FIELD(rip);
10259 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10260 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10261 {
10262 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10263 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10264 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10265 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10266 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10267 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10268 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10269 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10270 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10271 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10272 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10273 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10274 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10275 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10276 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10277 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10278 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10279 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10280 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10281 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10282 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10283 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10284 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10285 }
10286
10287 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10288 CHECK_FIELD(rax);
10289 CHECK_FIELD(rcx);
10290 if (!pIemCpu->fIgnoreRaxRdx)
10291 CHECK_FIELD(rdx);
10292 CHECK_FIELD(rbx);
10293 CHECK_FIELD(rsp);
10294 CHECK_FIELD(rbp);
10295 CHECK_FIELD(rsi);
10296 CHECK_FIELD(rdi);
10297 CHECK_FIELD(r8);
10298 CHECK_FIELD(r9);
10299 CHECK_FIELD(r10);
10300 CHECK_FIELD(r11);
10301 CHECK_FIELD(r12);
10302 CHECK_FIELD(r13);
10303 CHECK_SEL(cs);
10304 CHECK_SEL(ss);
10305 CHECK_SEL(ds);
10306 CHECK_SEL(es);
10307 CHECK_SEL(fs);
10308 CHECK_SEL(gs);
10309 CHECK_FIELD(cr0);
10310
10311 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10312 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10313 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10314 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10315 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10316 {
10317 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10318 { /* ignore */ }
10319 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10320 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10321 && fRem)
10322 { /* ignore */ }
10323 else
10324 CHECK_FIELD(cr2);
10325 }
10326 CHECK_FIELD(cr3);
10327 CHECK_FIELD(cr4);
10328 CHECK_FIELD(dr[0]);
10329 CHECK_FIELD(dr[1]);
10330 CHECK_FIELD(dr[2]);
10331 CHECK_FIELD(dr[3]);
10332 CHECK_FIELD(dr[6]);
10333 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10334 CHECK_FIELD(dr[7]);
10335 CHECK_FIELD(gdtr.cbGdt);
10336 CHECK_FIELD(gdtr.pGdt);
10337 CHECK_FIELD(idtr.cbIdt);
10338 CHECK_FIELD(idtr.pIdt);
10339 CHECK_SEL(ldtr);
10340 CHECK_SEL(tr);
10341 CHECK_FIELD(SysEnter.cs);
10342 CHECK_FIELD(SysEnter.eip);
10343 CHECK_FIELD(SysEnter.esp);
10344 CHECK_FIELD(msrEFER);
10345 CHECK_FIELD(msrSTAR);
10346 CHECK_FIELD(msrPAT);
10347 CHECK_FIELD(msrLSTAR);
10348 CHECK_FIELD(msrCSTAR);
10349 CHECK_FIELD(msrSFMASK);
10350 CHECK_FIELD(msrKERNELGSBASE);
10351
10352 if (cDiffs != 0)
10353 {
10354 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10355 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10356 iemVerifyAssertMsg2(pIemCpu);
10357 RTAssertPanic();
10358 }
10359# undef CHECK_FIELD
10360# undef CHECK_BIT_FIELD
10361 }
10362
10363 /*
10364 * If the register state compared fine, check the verification event
10365 * records.
10366 */
10367 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10368 {
10369 /*
10370 * Compare verficiation event records.
10371 * - I/O port accesses should be a 1:1 match.
10372 */
10373 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10374 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10375 while (pIemRec && pOtherRec)
10376 {
10377 /* Since we might miss RAM writes and reads, ignore reads and check
10378 that any written memory is the same extra ones. */
10379 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10380 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10381 && pIemRec->pNext)
10382 {
10383 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10384 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10385 pIemRec = pIemRec->pNext;
10386 }
10387
10388 /* Do the compare. */
10389 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10390 {
10391 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10392 break;
10393 }
10394 bool fEquals;
10395 switch (pIemRec->enmEvent)
10396 {
10397 case IEMVERIFYEVENT_IOPORT_READ:
10398 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10399 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10400 break;
10401 case IEMVERIFYEVENT_IOPORT_WRITE:
10402 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10403 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10404 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10405 break;
10406 case IEMVERIFYEVENT_RAM_READ:
10407 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10408 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10409 break;
10410 case IEMVERIFYEVENT_RAM_WRITE:
10411 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10412 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10413 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10414 break;
10415 default:
10416 fEquals = false;
10417 break;
10418 }
10419 if (!fEquals)
10420 {
10421 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10422 break;
10423 }
10424
10425 /* advance */
10426 pIemRec = pIemRec->pNext;
10427 pOtherRec = pOtherRec->pNext;
10428 }
10429
10430 /* Ignore extra writes and reads. */
10431 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10432 {
10433 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10434 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10435 pIemRec = pIemRec->pNext;
10436 }
10437 if (pIemRec != NULL)
10438 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10439 else if (pOtherRec != NULL)
10440 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10441 }
10442 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10443}
10444
10445#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10446
10447/* stubs */
10448static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10449{
10450 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10451 return VERR_INTERNAL_ERROR;
10452}
10453
10454static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10455{
10456 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10457 return VERR_INTERNAL_ERROR;
10458}
10459
10460#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10461
10462
10463#ifdef LOG_ENABLED
10464/**
10465 * Logs the current instruction.
10466 * @param pVCpu The cross context virtual CPU structure of the caller.
10467 * @param pCtx The current CPU context.
10468 * @param fSameCtx Set if we have the same context information as the VMM,
10469 * clear if we may have already executed an instruction in
10470 * our debug context. When clear, we assume IEMCPU holds
10471 * valid CPU mode info.
10472 */
10473static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10474{
10475# ifdef IN_RING3
10476 if (LogIs2Enabled())
10477 {
10478 char szInstr[256];
10479 uint32_t cbInstr = 0;
10480 if (fSameCtx)
10481 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10482 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10483 szInstr, sizeof(szInstr), &cbInstr);
10484 else
10485 {
10486 uint32_t fFlags = 0;
10487 switch (pVCpu->iem.s.enmCpuMode)
10488 {
10489 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10490 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10491 case IEMMODE_16BIT:
10492 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10493 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10494 else
10495 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10496 break;
10497 }
10498 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10499 szInstr, sizeof(szInstr), &cbInstr);
10500 }
10501
10502 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10503 Log2(("****\n"
10504 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10505 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10506 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10507 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10508 " %s\n"
10509 ,
10510 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10511 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10512 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10513 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10514 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10515 szInstr));
10516
10517 if (LogIs3Enabled())
10518 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10519 }
10520 else
10521# endif
10522 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10523 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10524}
10525#endif
10526
10527
10528/**
10529 * Makes status code addjustments (pass up from I/O and access handler)
10530 * as well as maintaining statistics.
10531 *
10532 * @returns Strict VBox status code to pass up.
10533 * @param pIemCpu The IEM per CPU data.
10534 * @param rcStrict The status from executing an instruction.
10535 */
10536DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10537{
10538 if (rcStrict != VINF_SUCCESS)
10539 {
10540 if (RT_SUCCESS(rcStrict))
10541 {
10542 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10543 || rcStrict == VINF_IOM_R3_IOPORT_READ
10544 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10545 || rcStrict == VINF_IOM_R3_MMIO_READ
10546 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10547 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10548 || rcStrict == VINF_CPUM_R3_MSR_READ
10549 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10550 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10551 int32_t const rcPassUp = pIemCpu->rcPassUp;
10552 if (rcPassUp == VINF_SUCCESS)
10553 pIemCpu->cRetInfStatuses++;
10554 else if ( rcPassUp < VINF_EM_FIRST
10555 || rcPassUp > VINF_EM_LAST
10556 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10557 {
10558 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10559 pIemCpu->cRetPassUpStatus++;
10560 rcStrict = rcPassUp;
10561 }
10562 else
10563 {
10564 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10565 pIemCpu->cRetInfStatuses++;
10566 }
10567 }
10568 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10569 pIemCpu->cRetAspectNotImplemented++;
10570 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10571 pIemCpu->cRetInstrNotImplemented++;
10572#ifdef IEM_VERIFICATION_MODE_FULL
10573 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10574 rcStrict = VINF_SUCCESS;
10575#endif
10576 else
10577 pIemCpu->cRetErrStatuses++;
10578 }
10579 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10580 {
10581 pIemCpu->cRetPassUpStatus++;
10582 rcStrict = pIemCpu->rcPassUp;
10583 }
10584
10585 return rcStrict;
10586}
10587
10588
10589/**
10590 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10591 * IEMExecOneWithPrefetchedByPC.
10592 *
10593 * @return Strict VBox status code.
10594 * @param pVCpu The current virtual CPU.
10595 * @param pIemCpu The IEM per CPU data.
10596 * @param fExecuteInhibit If set, execute the instruction following CLI,
10597 * POP SS and MOV SS,GR.
10598 */
10599DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10600{
10601 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10602 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10603 if (rcStrict == VINF_SUCCESS)
10604 pIemCpu->cInstructions++;
10605 if (pIemCpu->cActiveMappings > 0)
10606 iemMemRollback(pIemCpu);
10607//#ifdef DEBUG
10608// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10609//#endif
10610
10611 /* Execute the next instruction as well if a cli, pop ss or
10612 mov ss, Gr has just completed successfully. */
10613 if ( fExecuteInhibit
10614 && rcStrict == VINF_SUCCESS
10615 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10616 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10617 {
10618 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10619 if (rcStrict == VINF_SUCCESS)
10620 {
10621# ifdef LOG_ENABLED
10622 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10623# endif
10624 IEM_OPCODE_GET_NEXT_U8(&b);
10625 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10626 if (rcStrict == VINF_SUCCESS)
10627 pIemCpu->cInstructions++;
10628 if (pIemCpu->cActiveMappings > 0)
10629 iemMemRollback(pIemCpu);
10630 }
10631 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10632 }
10633
10634 /*
10635 * Return value fiddling, statistics and sanity assertions.
10636 */
10637 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10638
10639 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10640 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10641#if defined(IEM_VERIFICATION_MODE_FULL)
10642 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10643 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10644 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10645 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10646#endif
10647 return rcStrict;
10648}
10649
10650
10651#ifdef IN_RC
10652/**
10653 * Re-enters raw-mode or ensure we return to ring-3.
10654 *
10655 * @returns rcStrict, maybe modified.
10656 * @param pIemCpu The IEM CPU structure.
10657 * @param pVCpu The cross context virtual CPU structure of the caller.
10658 * @param pCtx The current CPU context.
10659 * @param rcStrict The status code returne by the interpreter.
10660 */
10661DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10662{
10663 if (!pIemCpu->fInPatchCode)
10664 CPUMRawEnter(pVCpu);
10665 return rcStrict;
10666}
10667#endif
10668
10669
10670/**
10671 * Execute one instruction.
10672 *
10673 * @return Strict VBox status code.
10674 * @param pVCpu The current virtual CPU.
10675 */
10676VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10677{
10678 PIEMCPU pIemCpu = &pVCpu->iem.s;
10679
10680#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10681 iemExecVerificationModeSetup(pIemCpu);
10682#endif
10683#ifdef LOG_ENABLED
10684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10685 iemLogCurInstr(pVCpu, pCtx, true);
10686#endif
10687
10688 /*
10689 * Do the decoding and emulation.
10690 */
10691 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10692 if (rcStrict == VINF_SUCCESS)
10693 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10694
10695#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10696 /*
10697 * Assert some sanity.
10698 */
10699 iemExecVerificationModeCheck(pIemCpu);
10700#endif
10701#ifdef IN_RC
10702 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10703#endif
10704 if (rcStrict != VINF_SUCCESS)
10705 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10706 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10707 return rcStrict;
10708}
10709
10710
10711VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10712{
10713 PIEMCPU pIemCpu = &pVCpu->iem.s;
10714 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10715 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10716
10717 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10718 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10719 if (rcStrict == VINF_SUCCESS)
10720 {
10721 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10722 if (pcbWritten)
10723 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10724 }
10725
10726#ifdef IN_RC
10727 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10728#endif
10729 return rcStrict;
10730}
10731
10732
10733VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10734 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10735{
10736 PIEMCPU pIemCpu = &pVCpu->iem.s;
10737 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10738 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10739
10740 VBOXSTRICTRC rcStrict;
10741 if ( cbOpcodeBytes
10742 && pCtx->rip == OpcodeBytesPC)
10743 {
10744 iemInitDecoder(pIemCpu, false);
10745 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10746 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10747 rcStrict = VINF_SUCCESS;
10748 }
10749 else
10750 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10751 if (rcStrict == VINF_SUCCESS)
10752 {
10753 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10754 }
10755
10756#ifdef IN_RC
10757 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10758#endif
10759 return rcStrict;
10760}
10761
10762
10763VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10764{
10765 PIEMCPU pIemCpu = &pVCpu->iem.s;
10766 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10767 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10768
10769 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10770 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10771 if (rcStrict == VINF_SUCCESS)
10772 {
10773 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10774 if (pcbWritten)
10775 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10776 }
10777
10778#ifdef IN_RC
10779 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10780#endif
10781 return rcStrict;
10782}
10783
10784
10785VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10786 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10787{
10788 PIEMCPU pIemCpu = &pVCpu->iem.s;
10789 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10790 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10791
10792 VBOXSTRICTRC rcStrict;
10793 if ( cbOpcodeBytes
10794 && pCtx->rip == OpcodeBytesPC)
10795 {
10796 iemInitDecoder(pIemCpu, true);
10797 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10798 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10799 rcStrict = VINF_SUCCESS;
10800 }
10801 else
10802 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10803 if (rcStrict == VINF_SUCCESS)
10804 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10805
10806#ifdef IN_RC
10807 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10808#endif
10809 return rcStrict;
10810}
10811
10812
10813VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10814{
10815 PIEMCPU pIemCpu = &pVCpu->iem.s;
10816
10817 /*
10818 * See if there is an interrupt pending in TRPM and inject it if we can.
10819 */
10820#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10822# ifdef IEM_VERIFICATION_MODE_FULL
10823 pIemCpu->uInjectCpl = UINT8_MAX;
10824# endif
10825 if ( pCtx->eflags.Bits.u1IF
10826 && TRPMHasTrap(pVCpu)
10827 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
10828 {
10829 uint8_t u8TrapNo;
10830 TRPMEVENT enmType;
10831 RTGCUINT uErrCode;
10832 RTGCPTR uCr2;
10833 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10834 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10835 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10836 TRPMResetTrap(pVCpu);
10837 }
10838#else
10839 iemExecVerificationModeSetup(pIemCpu);
10840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10841#endif
10842
10843 /*
10844 * Log the state.
10845 */
10846#ifdef LOG_ENABLED
10847 iemLogCurInstr(pVCpu, pCtx, true);
10848#endif
10849
10850 /*
10851 * Do the decoding and emulation.
10852 */
10853 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10854 if (rcStrict == VINF_SUCCESS)
10855 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10856
10857#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10858 /*
10859 * Assert some sanity.
10860 */
10861 iemExecVerificationModeCheck(pIemCpu);
10862#endif
10863
10864 /*
10865 * Maybe re-enter raw-mode and log.
10866 */
10867#ifdef IN_RC
10868 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10869#endif
10870 if (rcStrict != VINF_SUCCESS)
10871 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10872 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10873 return rcStrict;
10874}
10875
10876
10877
10878/**
10879 * Injects a trap, fault, abort, software interrupt or external interrupt.
10880 *
10881 * The parameter list matches TRPMQueryTrapAll pretty closely.
10882 *
10883 * @returns Strict VBox status code.
10884 * @param pVCpu The current virtual CPU.
10885 * @param u8TrapNo The trap number.
10886 * @param enmType What type is it (trap/fault/abort), software
10887 * interrupt or hardware interrupt.
10888 * @param uErrCode The error code if applicable.
10889 * @param uCr2 The CR2 value if applicable.
10890 * @param cbInstr The instruction length (only relevant for
10891 * software interrupts).
10892 */
10893VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10894 uint8_t cbInstr)
10895{
10896 iemInitDecoder(&pVCpu->iem.s, false);
10897#ifdef DBGFTRACE_ENABLED
10898 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10899 u8TrapNo, enmType, uErrCode, uCr2);
10900#endif
10901
10902 uint32_t fFlags;
10903 switch (enmType)
10904 {
10905 case TRPM_HARDWARE_INT:
10906 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10907 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10908 uErrCode = uCr2 = 0;
10909 break;
10910
10911 case TRPM_SOFTWARE_INT:
10912 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10913 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10914 uErrCode = uCr2 = 0;
10915 break;
10916
10917 case TRPM_TRAP:
10918 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10919 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10920 if (u8TrapNo == X86_XCPT_PF)
10921 fFlags |= IEM_XCPT_FLAGS_CR2;
10922 switch (u8TrapNo)
10923 {
10924 case X86_XCPT_DF:
10925 case X86_XCPT_TS:
10926 case X86_XCPT_NP:
10927 case X86_XCPT_SS:
10928 case X86_XCPT_PF:
10929 case X86_XCPT_AC:
10930 fFlags |= IEM_XCPT_FLAGS_ERR;
10931 break;
10932
10933 case X86_XCPT_NMI:
10934 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
10935 break;
10936 }
10937 break;
10938
10939 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10940 }
10941
10942 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10943}
10944
10945
10946/**
10947 * Injects the active TRPM event.
10948 *
10949 * @returns Strict VBox status code.
10950 * @param pVCpu Pointer to the VMCPU.
10951 */
10952VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
10953{
10954#ifndef IEM_IMPLEMENTS_TASKSWITCH
10955 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10956#else
10957 uint8_t u8TrapNo;
10958 TRPMEVENT enmType;
10959 RTGCUINT uErrCode;
10960 RTGCUINTPTR uCr2;
10961 uint8_t cbInstr;
10962 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
10963 if (RT_FAILURE(rc))
10964 return rc;
10965
10966 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10967
10968 /** @todo Are there any other codes that imply the event was successfully
10969 * delivered to the guest? See @bugref{6607}. */
10970 if ( rcStrict == VINF_SUCCESS
10971 || rcStrict == VINF_IEM_RAISED_XCPT)
10972 {
10973 TRPMResetTrap(pVCpu);
10974 }
10975 return rcStrict;
10976#endif
10977}
10978
10979
10980VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10981{
10982 return VERR_NOT_IMPLEMENTED;
10983}
10984
10985
10986VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10987{
10988 return VERR_NOT_IMPLEMENTED;
10989}
10990
10991
10992#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10993/**
10994 * Executes a IRET instruction with default operand size.
10995 *
10996 * This is for PATM.
10997 *
10998 * @returns VBox status code.
10999 * @param pVCpu The current virtual CPU.
11000 * @param pCtxCore The register frame.
11001 */
11002VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11003{
11004 PIEMCPU pIemCpu = &pVCpu->iem.s;
11005 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11006
11007 iemCtxCoreToCtx(pCtx, pCtxCore);
11008 iemInitDecoder(pIemCpu);
11009 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11010 if (rcStrict == VINF_SUCCESS)
11011 iemCtxToCtxCore(pCtxCore, pCtx);
11012 else
11013 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11014 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11015 return rcStrict;
11016}
11017#endif
11018
11019
11020/**
11021 * Macro used by the IEMExec* method to check the given instruction length.
11022 *
11023 * Will return on failure!
11024 *
11025 * @param a_cbInstr The given instruction length.
11026 * @param a_cbMin The minimum length.
11027 */
11028#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11029 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11030 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11031
11032
11033/**
11034 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11035 *
11036 * This API ASSUMES that the caller has already verified that the guest code is
11037 * allowed to access the I/O port. (The I/O port is in the DX register in the
11038 * guest state.)
11039 *
11040 * @returns Strict VBox status code.
11041 * @param pVCpu The cross context per virtual CPU structure.
11042 * @param cbValue The size of the I/O port access (1, 2, or 4).
11043 * @param enmAddrMode The addressing mode.
11044 * @param fRepPrefix Indicates whether a repeat prefix is used
11045 * (doesn't matter which for this instruction).
11046 * @param cbInstr The instruction length in bytes.
11047 * @param iEffSeg The effective segment address.
11048 */
11049VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11050 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11051{
11052 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11053 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11054
11055 /*
11056 * State init.
11057 */
11058 PIEMCPU pIemCpu = &pVCpu->iem.s;
11059 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11060
11061 /*
11062 * Switch orgy for getting to the right handler.
11063 */
11064 VBOXSTRICTRC rcStrict;
11065 if (fRepPrefix)
11066 {
11067 switch (enmAddrMode)
11068 {
11069 case IEMMODE_16BIT:
11070 switch (cbValue)
11071 {
11072 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11073 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11074 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11075 default:
11076 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11077 }
11078 break;
11079
11080 case IEMMODE_32BIT:
11081 switch (cbValue)
11082 {
11083 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11084 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11085 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11086 default:
11087 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11088 }
11089 break;
11090
11091 case IEMMODE_64BIT:
11092 switch (cbValue)
11093 {
11094 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11095 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11096 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11097 default:
11098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11099 }
11100 break;
11101
11102 default:
11103 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11104 }
11105 }
11106 else
11107 {
11108 switch (enmAddrMode)
11109 {
11110 case IEMMODE_16BIT:
11111 switch (cbValue)
11112 {
11113 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11114 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11115 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11116 default:
11117 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11118 }
11119 break;
11120
11121 case IEMMODE_32BIT:
11122 switch (cbValue)
11123 {
11124 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11125 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11126 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11127 default:
11128 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11129 }
11130 break;
11131
11132 case IEMMODE_64BIT:
11133 switch (cbValue)
11134 {
11135 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11136 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11137 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11138 default:
11139 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11140 }
11141 break;
11142
11143 default:
11144 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11145 }
11146 }
11147
11148 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11149}
11150
11151
11152/**
11153 * Interface for HM and EM for executing string I/O IN (read) instructions.
11154 *
11155 * This API ASSUMES that the caller has already verified that the guest code is
11156 * allowed to access the I/O port. (The I/O port is in the DX register in the
11157 * guest state.)
11158 *
11159 * @returns Strict VBox status code.
11160 * @param pVCpu The cross context per virtual CPU structure.
11161 * @param cbValue The size of the I/O port access (1, 2, or 4).
11162 * @param enmAddrMode The addressing mode.
11163 * @param fRepPrefix Indicates whether a repeat prefix is used
11164 * (doesn't matter which for this instruction).
11165 * @param cbInstr The instruction length in bytes.
11166 */
11167VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11168 bool fRepPrefix, uint8_t cbInstr)
11169{
11170 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11171
11172 /*
11173 * State init.
11174 */
11175 PIEMCPU pIemCpu = &pVCpu->iem.s;
11176 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11177
11178 /*
11179 * Switch orgy for getting to the right handler.
11180 */
11181 VBOXSTRICTRC rcStrict;
11182 if (fRepPrefix)
11183 {
11184 switch (enmAddrMode)
11185 {
11186 case IEMMODE_16BIT:
11187 switch (cbValue)
11188 {
11189 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11190 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11191 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11192 default:
11193 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11194 }
11195 break;
11196
11197 case IEMMODE_32BIT:
11198 switch (cbValue)
11199 {
11200 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11201 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11202 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11203 default:
11204 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11205 }
11206 break;
11207
11208 case IEMMODE_64BIT:
11209 switch (cbValue)
11210 {
11211 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11212 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11213 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11214 default:
11215 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11216 }
11217 break;
11218
11219 default:
11220 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11221 }
11222 }
11223 else
11224 {
11225 switch (enmAddrMode)
11226 {
11227 case IEMMODE_16BIT:
11228 switch (cbValue)
11229 {
11230 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11231 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11232 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11233 default:
11234 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11235 }
11236 break;
11237
11238 case IEMMODE_32BIT:
11239 switch (cbValue)
11240 {
11241 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11242 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11243 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11244 default:
11245 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11246 }
11247 break;
11248
11249 case IEMMODE_64BIT:
11250 switch (cbValue)
11251 {
11252 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11253 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11254 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11255 default:
11256 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11257 }
11258 break;
11259
11260 default:
11261 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11262 }
11263 }
11264
11265 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11266}
11267
11268
11269
11270/**
11271 * Interface for HM and EM to write to a CRx register.
11272 *
11273 * @returns Strict VBox status code.
11274 * @param pVCpu The cross context per virtual CPU structure.
11275 * @param cbInstr The instruction length in bytes.
11276 * @param iCrReg The control register number (destination).
11277 * @param iGReg The general purpose register number (source).
11278 *
11279 * @remarks In ring-0 not all of the state needs to be synced in.
11280 */
11281VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11282{
11283 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11284 Assert(iCrReg < 16);
11285 Assert(iGReg < 16);
11286
11287 PIEMCPU pIemCpu = &pVCpu->iem.s;
11288 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11290 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11291}
11292
11293
11294/**
11295 * Interface for HM and EM to read from a CRx register.
11296 *
11297 * @returns Strict VBox status code.
11298 * @param pVCpu The cross context per virtual CPU structure.
11299 * @param cbInstr The instruction length in bytes.
11300 * @param iGReg The general purpose register number (destination).
11301 * @param iCrReg The control register number (source).
11302 *
11303 * @remarks In ring-0 not all of the state needs to be synced in.
11304 */
11305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11306{
11307 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11308 Assert(iCrReg < 16);
11309 Assert(iGReg < 16);
11310
11311 PIEMCPU pIemCpu = &pVCpu->iem.s;
11312 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11313 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11314 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11315}
11316
11317
11318/**
11319 * Interface for HM and EM to clear the CR0[TS] bit.
11320 *
11321 * @returns Strict VBox status code.
11322 * @param pVCpu The cross context per virtual CPU structure.
11323 * @param cbInstr The instruction length in bytes.
11324 *
11325 * @remarks In ring-0 not all of the state needs to be synced in.
11326 */
11327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11328{
11329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11330
11331 PIEMCPU pIemCpu = &pVCpu->iem.s;
11332 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11333 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11334 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11335}
11336
11337
11338/**
11339 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11340 *
11341 * @returns Strict VBox status code.
11342 * @param pVCpu The cross context per virtual CPU structure.
11343 * @param cbInstr The instruction length in bytes.
11344 * @param uValue The value to load into CR0.
11345 *
11346 * @remarks In ring-0 not all of the state needs to be synced in.
11347 */
11348VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11349{
11350 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11351
11352 PIEMCPU pIemCpu = &pVCpu->iem.s;
11353 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11354 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11355 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11356}
11357
11358
11359/**
11360 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11361 *
11362 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11363 *
11364 * @returns Strict VBox status code.
11365 * @param pVCpu The cross context per virtual CPU structure of the
11366 * calling EMT.
11367 * @param cbInstr The instruction length in bytes.
11368 * @remarks In ring-0 not all of the state needs to be synced in.
11369 * @threads EMT(pVCpu)
11370 */
11371VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11372{
11373 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11374
11375 PIEMCPU pIemCpu = &pVCpu->iem.s;
11376 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11377 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11378 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11379}
11380
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette