VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 55289

Last change on this file since 55289 was 55289, checked in by vboxsync, 10 years ago

IEM,CPUM: Implemented XSETBV and XGETBV.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 426.9 KB
Line 
1/* $Id: IEMAll.cpp 55289 2015-04-15 15:02:57Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** Temporary hack to disable the double execution. Will be removed in favor
192 * of a dedicated execution mode in EM. */
193//#define IEM_VERIFICATION_MODE_NO_REM
194
195/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
196 * due to GCC lacking knowledge about the value range of a switch. */
197#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
198
199/**
200 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
201 * occation.
202 */
203#ifdef LOG_ENABLED
204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
205 do { \
206 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
207 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
208 } while (0)
209#else
210# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
212#endif
213
214/**
215 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
216 * occation using the supplied logger statement.
217 *
218 * @param a_LoggerArgs What to log on failure.
219 */
220#ifdef LOG_ENABLED
221# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
222 do { \
223 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
224 /*LogFunc(a_LoggerArgs);*/ \
225 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
226 } while (0)
227#else
228# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
230#endif
231
232/**
233 * Call an opcode decoder function.
234 *
235 * We're using macors for this so that adding and removing parameters can be
236 * done as we please. See FNIEMOP_DEF.
237 */
238#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
239
240/**
241 * Call a common opcode decoder function taking one extra argument.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF_1.
245 */
246#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
255
256/**
257 * Check if we're currently executing in real or virtual 8086 mode.
258 *
259 * @returns @c true if it is, @c false if not.
260 * @param a_pIemCpu The IEM state of the current CPU.
261 */
262#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
263
264/**
265 * Check if we're currently executing in virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
290 * @returns PCCPUMFEATURES
291 * @param a_pIemCpu The IEM state of the current CPU.
292 */
293#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
294
295/**
296 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
297 * @returns PCCPUMFEATURES
298 * @param a_pIemCpu The IEM state of the current CPU.
299 */
300#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
301
302/**
303 * Evaluates to true if we're presenting an Intel CPU to the guest.
304 */
305#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
306
307/**
308 * Evaluates to true if we're presenting an AMD CPU to the guest.
309 */
310#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
311
312/**
313 * Check if the address is canonical.
314 */
315#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
322
323
324/** Function table for the ADD instruction. */
325static const IEMOPBINSIZES g_iemAImpl_add =
326{
327 iemAImpl_add_u8, iemAImpl_add_u8_locked,
328 iemAImpl_add_u16, iemAImpl_add_u16_locked,
329 iemAImpl_add_u32, iemAImpl_add_u32_locked,
330 iemAImpl_add_u64, iemAImpl_add_u64_locked
331};
332
333/** Function table for the ADC instruction. */
334static const IEMOPBINSIZES g_iemAImpl_adc =
335{
336 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
337 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
338 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
339 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
340};
341
342/** Function table for the SUB instruction. */
343static const IEMOPBINSIZES g_iemAImpl_sub =
344{
345 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
346 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
347 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
348 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
349};
350
351/** Function table for the SBB instruction. */
352static const IEMOPBINSIZES g_iemAImpl_sbb =
353{
354 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
355 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
356 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
357 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
358};
359
360/** Function table for the OR instruction. */
361static const IEMOPBINSIZES g_iemAImpl_or =
362{
363 iemAImpl_or_u8, iemAImpl_or_u8_locked,
364 iemAImpl_or_u16, iemAImpl_or_u16_locked,
365 iemAImpl_or_u32, iemAImpl_or_u32_locked,
366 iemAImpl_or_u64, iemAImpl_or_u64_locked
367};
368
369/** Function table for the XOR instruction. */
370static const IEMOPBINSIZES g_iemAImpl_xor =
371{
372 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
373 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
374 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
375 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
376};
377
378/** Function table for the AND instruction. */
379static const IEMOPBINSIZES g_iemAImpl_and =
380{
381 iemAImpl_and_u8, iemAImpl_and_u8_locked,
382 iemAImpl_and_u16, iemAImpl_and_u16_locked,
383 iemAImpl_and_u32, iemAImpl_and_u32_locked,
384 iemAImpl_and_u64, iemAImpl_and_u64_locked
385};
386
387/** Function table for the CMP instruction.
388 * @remarks Making operand order ASSUMPTIONS.
389 */
390static const IEMOPBINSIZES g_iemAImpl_cmp =
391{
392 iemAImpl_cmp_u8, NULL,
393 iemAImpl_cmp_u16, NULL,
394 iemAImpl_cmp_u32, NULL,
395 iemAImpl_cmp_u64, NULL
396};
397
398/** Function table for the TEST instruction.
399 * @remarks Making operand order ASSUMPTIONS.
400 */
401static const IEMOPBINSIZES g_iemAImpl_test =
402{
403 iemAImpl_test_u8, NULL,
404 iemAImpl_test_u16, NULL,
405 iemAImpl_test_u32, NULL,
406 iemAImpl_test_u64, NULL
407};
408
409/** Function table for the BT instruction. */
410static const IEMOPBINSIZES g_iemAImpl_bt =
411{
412 NULL, NULL,
413 iemAImpl_bt_u16, NULL,
414 iemAImpl_bt_u32, NULL,
415 iemAImpl_bt_u64, NULL
416};
417
418/** Function table for the BTC instruction. */
419static const IEMOPBINSIZES g_iemAImpl_btc =
420{
421 NULL, NULL,
422 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
423 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
424 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
425};
426
427/** Function table for the BTR instruction. */
428static const IEMOPBINSIZES g_iemAImpl_btr =
429{
430 NULL, NULL,
431 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
432 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
433 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
434};
435
436/** Function table for the BTS instruction. */
437static const IEMOPBINSIZES g_iemAImpl_bts =
438{
439 NULL, NULL,
440 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
441 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
442 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
443};
444
445/** Function table for the BSF instruction. */
446static const IEMOPBINSIZES g_iemAImpl_bsf =
447{
448 NULL, NULL,
449 iemAImpl_bsf_u16, NULL,
450 iemAImpl_bsf_u32, NULL,
451 iemAImpl_bsf_u64, NULL
452};
453
454/** Function table for the BSR instruction. */
455static const IEMOPBINSIZES g_iemAImpl_bsr =
456{
457 NULL, NULL,
458 iemAImpl_bsr_u16, NULL,
459 iemAImpl_bsr_u32, NULL,
460 iemAImpl_bsr_u64, NULL
461};
462
463/** Function table for the IMUL instruction. */
464static const IEMOPBINSIZES g_iemAImpl_imul_two =
465{
466 NULL, NULL,
467 iemAImpl_imul_two_u16, NULL,
468 iemAImpl_imul_two_u32, NULL,
469 iemAImpl_imul_two_u64, NULL
470};
471
472/** Group 1 /r lookup table. */
473static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
474{
475 &g_iemAImpl_add,
476 &g_iemAImpl_or,
477 &g_iemAImpl_adc,
478 &g_iemAImpl_sbb,
479 &g_iemAImpl_and,
480 &g_iemAImpl_sub,
481 &g_iemAImpl_xor,
482 &g_iemAImpl_cmp
483};
484
485/** Function table for the INC instruction. */
486static const IEMOPUNARYSIZES g_iemAImpl_inc =
487{
488 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
489 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
490 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
491 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
492};
493
494/** Function table for the DEC instruction. */
495static const IEMOPUNARYSIZES g_iemAImpl_dec =
496{
497 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
498 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
499 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
500 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
501};
502
503/** Function table for the NEG instruction. */
504static const IEMOPUNARYSIZES g_iemAImpl_neg =
505{
506 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
507 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
508 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
509 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
510};
511
512/** Function table for the NOT instruction. */
513static const IEMOPUNARYSIZES g_iemAImpl_not =
514{
515 iemAImpl_not_u8, iemAImpl_not_u8_locked,
516 iemAImpl_not_u16, iemAImpl_not_u16_locked,
517 iemAImpl_not_u32, iemAImpl_not_u32_locked,
518 iemAImpl_not_u64, iemAImpl_not_u64_locked
519};
520
521
522/** Function table for the ROL instruction. */
523static const IEMOPSHIFTSIZES g_iemAImpl_rol =
524{
525 iemAImpl_rol_u8,
526 iemAImpl_rol_u16,
527 iemAImpl_rol_u32,
528 iemAImpl_rol_u64
529};
530
531/** Function table for the ROR instruction. */
532static const IEMOPSHIFTSIZES g_iemAImpl_ror =
533{
534 iemAImpl_ror_u8,
535 iemAImpl_ror_u16,
536 iemAImpl_ror_u32,
537 iemAImpl_ror_u64
538};
539
540/** Function table for the RCL instruction. */
541static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
542{
543 iemAImpl_rcl_u8,
544 iemAImpl_rcl_u16,
545 iemAImpl_rcl_u32,
546 iemAImpl_rcl_u64
547};
548
549/** Function table for the RCR instruction. */
550static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
551{
552 iemAImpl_rcr_u8,
553 iemAImpl_rcr_u16,
554 iemAImpl_rcr_u32,
555 iemAImpl_rcr_u64
556};
557
558/** Function table for the SHL instruction. */
559static const IEMOPSHIFTSIZES g_iemAImpl_shl =
560{
561 iemAImpl_shl_u8,
562 iemAImpl_shl_u16,
563 iemAImpl_shl_u32,
564 iemAImpl_shl_u64
565};
566
567/** Function table for the SHR instruction. */
568static const IEMOPSHIFTSIZES g_iemAImpl_shr =
569{
570 iemAImpl_shr_u8,
571 iemAImpl_shr_u16,
572 iemAImpl_shr_u32,
573 iemAImpl_shr_u64
574};
575
576/** Function table for the SAR instruction. */
577static const IEMOPSHIFTSIZES g_iemAImpl_sar =
578{
579 iemAImpl_sar_u8,
580 iemAImpl_sar_u16,
581 iemAImpl_sar_u32,
582 iemAImpl_sar_u64
583};
584
585
586/** Function table for the MUL instruction. */
587static const IEMOPMULDIVSIZES g_iemAImpl_mul =
588{
589 iemAImpl_mul_u8,
590 iemAImpl_mul_u16,
591 iemAImpl_mul_u32,
592 iemAImpl_mul_u64
593};
594
595/** Function table for the IMUL instruction working implicitly on rAX. */
596static const IEMOPMULDIVSIZES g_iemAImpl_imul =
597{
598 iemAImpl_imul_u8,
599 iemAImpl_imul_u16,
600 iemAImpl_imul_u32,
601 iemAImpl_imul_u64
602};
603
604/** Function table for the DIV instruction. */
605static const IEMOPMULDIVSIZES g_iemAImpl_div =
606{
607 iemAImpl_div_u8,
608 iemAImpl_div_u16,
609 iemAImpl_div_u32,
610 iemAImpl_div_u64
611};
612
613/** Function table for the MUL instruction. */
614static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
615{
616 iemAImpl_idiv_u8,
617 iemAImpl_idiv_u16,
618 iemAImpl_idiv_u32,
619 iemAImpl_idiv_u64
620};
621
622/** Function table for the SHLD instruction */
623static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
624{
625 iemAImpl_shld_u16,
626 iemAImpl_shld_u32,
627 iemAImpl_shld_u64,
628};
629
630/** Function table for the SHRD instruction */
631static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
632{
633 iemAImpl_shrd_u16,
634 iemAImpl_shrd_u32,
635 iemAImpl_shrd_u64,
636};
637
638
639/** Function table for the PUNPCKLBW instruction */
640static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
641/** Function table for the PUNPCKLBD instruction */
642static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
643/** Function table for the PUNPCKLDQ instruction */
644static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
645/** Function table for the PUNPCKLQDQ instruction */
646static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
647
648/** Function table for the PUNPCKHBW instruction */
649static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
650/** Function table for the PUNPCKHBD instruction */
651static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
652/** Function table for the PUNPCKHDQ instruction */
653static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
654/** Function table for the PUNPCKHQDQ instruction */
655static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
656
657/** Function table for the PXOR instruction */
658static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
659/** Function table for the PCMPEQB instruction */
660static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
661/** Function table for the PCMPEQW instruction */
662static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
663/** Function table for the PCMPEQD instruction */
664static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
665
666
667#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
668/** What IEM just wrote. */
669uint8_t g_abIemWrote[256];
670/** How much IEM just wrote. */
671size_t g_cbIemWrote;
672#endif
673
674
675/*******************************************************************************
676* Internal Functions *
677*******************************************************************************/
678static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
679static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
680static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
681static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
682/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
683static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
684static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
685static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
687static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
688static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
689static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
690static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
691static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
692static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
693static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
694static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
695static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
696static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
697static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
698static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
699static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
700static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
704static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
705static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
706static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
707static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
708static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
709static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
710static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
711
712#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
713static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
714#endif
715static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
716static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
717
718
719
720/**
721 * Sets the pass up status.
722 *
723 * @returns VINF_SUCCESS.
724 * @param pIemCpu The per CPU IEM state of the calling thread.
725 * @param rcPassUp The pass up status. Must be informational.
726 * VINF_SUCCESS is not allowed.
727 */
728static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
729{
730 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
731
732 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
733 if (rcOldPassUp == VINF_SUCCESS)
734 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
735 /* If both are EM scheduling codes, use EM priority rules. */
736 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
737 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
738 {
739 if (rcPassUp < rcOldPassUp)
740 {
741 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
742 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
743 }
744 else
745 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 }
747 /* Override EM scheduling with specific status code. */
748 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
749 {
750 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
751 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
752 }
753 /* Don't override specific status code, first come first served. */
754 else
755 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Initializes the execution state.
762 *
763 * @param pIemCpu The per CPU IEM state.
764 * @param fBypassHandlers Whether to bypass access handlers.
765 */
766DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
767{
768 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
770
771#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
773 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
780#endif
781
782#ifdef VBOX_WITH_RAW_MODE_NOT_R0
783 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
784#endif
785 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
786 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
787 ? IEMMODE_64BIT
788 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
789 ? IEMMODE_32BIT
790 : IEMMODE_16BIT;
791 pIemCpu->enmCpuMode = enmMode;
792#ifdef VBOX_STRICT
793 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
794 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
795 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
796 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
797 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
798 pIemCpu->uRexReg = 127;
799 pIemCpu->uRexB = 127;
800 pIemCpu->uRexIndex = 127;
801 pIemCpu->iEffSeg = 127;
802 pIemCpu->offOpcode = 127;
803 pIemCpu->cbOpcode = 127;
804#endif
805
806 pIemCpu->cActiveMappings = 0;
807 pIemCpu->iNextMapping = 0;
808 pIemCpu->rcPassUp = VINF_SUCCESS;
809 pIemCpu->fBypassHandlers = fBypassHandlers;
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
812 && pCtx->cs.u64Base == 0
813 && pCtx->cs.u32Limit == UINT32_MAX
814 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
815 if (!pIemCpu->fInPatchCode)
816 CPUMRawLeave(pVCpu, VINF_SUCCESS);
817#endif
818}
819
820
821/**
822 * Initializes the decoder state.
823 *
824 * @param pIemCpu The per CPU IEM state.
825 * @param fBypassHandlers Whether to bypass access handlers.
826 */
827DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
828{
829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
830 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
831
832#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
837 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
841#endif
842
843#ifdef VBOX_WITH_RAW_MODE_NOT_R0
844 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
845#endif
846 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
847#ifdef IEM_VERIFICATION_MODE_FULL
848 if (pIemCpu->uInjectCpl != UINT8_MAX)
849 pIemCpu->uCpl = pIemCpu->uInjectCpl;
850#endif
851 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
852 ? IEMMODE_64BIT
853 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
854 ? IEMMODE_32BIT
855 : IEMMODE_16BIT;
856 pIemCpu->enmCpuMode = enmMode;
857 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
858 pIemCpu->enmEffAddrMode = enmMode;
859 if (enmMode != IEMMODE_64BIT)
860 {
861 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
862 pIemCpu->enmEffOpSize = enmMode;
863 }
864 else
865 {
866 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
867 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
868 }
869 pIemCpu->fPrefixes = 0;
870 pIemCpu->uRexReg = 0;
871 pIemCpu->uRexB = 0;
872 pIemCpu->uRexIndex = 0;
873 pIemCpu->iEffSeg = X86_SREG_DS;
874 pIemCpu->offOpcode = 0;
875 pIemCpu->cbOpcode = 0;
876 pIemCpu->cActiveMappings = 0;
877 pIemCpu->iNextMapping = 0;
878 pIemCpu->rcPassUp = VINF_SUCCESS;
879 pIemCpu->fBypassHandlers = fBypassHandlers;
880#ifdef VBOX_WITH_RAW_MODE_NOT_R0
881 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
882 && pCtx->cs.u64Base == 0
883 && pCtx->cs.u32Limit == UINT32_MAX
884 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
885 if (!pIemCpu->fInPatchCode)
886 CPUMRawLeave(pVCpu, VINF_SUCCESS);
887#endif
888
889#ifdef DBGFTRACE_ENABLED
890 switch (enmMode)
891 {
892 case IEMMODE_64BIT:
893 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
894 break;
895 case IEMMODE_32BIT:
896 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
897 break;
898 case IEMMODE_16BIT:
899 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
900 break;
901 }
902#endif
903}
904
905
906/**
907 * Prefetch opcodes the first time when starting executing.
908 *
909 * @returns Strict VBox status code.
910 * @param pIemCpu The IEM state.
911 * @param fBypassHandlers Whether to bypass access handlers.
912 */
913static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
914{
915#ifdef IEM_VERIFICATION_MODE_FULL
916 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
917#endif
918 iemInitDecoder(pIemCpu, fBypassHandlers);
919
920 /*
921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
922 *
923 * First translate CS:rIP to a physical address.
924 */
925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
926 uint32_t cbToTryRead;
927 RTGCPTR GCPtrPC;
928 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
929 {
930 cbToTryRead = PAGE_SIZE;
931 GCPtrPC = pCtx->rip;
932 if (!IEM_IS_CANONICAL(GCPtrPC))
933 return iemRaiseGeneralProtectionFault0(pIemCpu);
934 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
935 }
936 else
937 {
938 uint32_t GCPtrPC32 = pCtx->eip;
939 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
940 if (GCPtrPC32 > pCtx->cs.u32Limit)
941 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
942 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
943 if (!cbToTryRead) /* overflowed */
944 {
945 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
946 cbToTryRead = UINT32_MAX;
947 }
948 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
949 Assert(GCPtrPC <= UINT32_MAX);
950 }
951
952#ifdef VBOX_WITH_RAW_MODE_NOT_R0
953 /* Allow interpretation of patch manager code blocks since they can for
954 instance throw #PFs for perfectly good reasons. */
955 if (pIemCpu->fInPatchCode)
956 {
957 size_t cbRead = 0;
958 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
959 AssertRCReturn(rc, rc);
960 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
961 return VINF_SUCCESS;
962 }
963#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
964
965 RTGCPHYS GCPhys;
966 uint64_t fFlags;
967 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
968 if (RT_FAILURE(rc))
969 {
970 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
971 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
972 }
973 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
974 {
975 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
976 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
977 }
978 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
982 }
983 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
984 /** @todo Check reserved bits and such stuff. PGM is better at doing
985 * that, so do it when implementing the guest virtual address
986 * TLB... */
987
988#ifdef IEM_VERIFICATION_MODE_FULL
989 /*
990 * Optimistic optimization: Use unconsumed opcode bytes from the previous
991 * instruction.
992 */
993 /** @todo optimize this differently by not using PGMPhysRead. */
994 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
995 pIemCpu->GCPhysOpcodes = GCPhys;
996 if ( offPrevOpcodes < cbOldOpcodes
997 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
998 {
999 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1000 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1001 pIemCpu->cbOpcode = cbNew;
1002 return VINF_SUCCESS;
1003 }
1004#endif
1005
1006 /*
1007 * Read the bytes at this address.
1008 */
1009 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1010#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1011 size_t cbActual;
1012 if ( PATMIsEnabled(pVM)
1013 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1014 {
1015 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1016 Assert(cbActual > 0);
1017 pIemCpu->cbOpcode = (uint8_t)cbActual;
1018 }
1019 else
1020#endif
1021 {
1022 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1026 cbToTryRead = sizeof(pIemCpu->abOpcode);
1027
1028 if (!pIemCpu->fBypassHandlers)
1029 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead);
1030 else
1031 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1032 if (rc != VINF_SUCCESS)
1033 {
1034 /** @todo status code handling */
1035 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1036 GCPtrPC, GCPhys, rc, cbToTryRead));
1037 return rc;
1038 }
1039 pIemCpu->cbOpcode = cbToTryRead;
1040 }
1041
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1048 * exception if it fails.
1049 *
1050 * @returns Strict VBox status code.
1051 * @param pIemCpu The IEM state.
1052 * @param cbMin The minimum number of bytes relative offOpcode
1053 * that must be read.
1054 */
1055static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1056{
1057 /*
1058 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1059 *
1060 * First translate CS:rIP to a physical address.
1061 */
1062 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1063 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1064 uint32_t cbToTryRead;
1065 RTGCPTR GCPtrNext;
1066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1067 {
1068 cbToTryRead = PAGE_SIZE;
1069 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1070 if (!IEM_IS_CANONICAL(GCPtrNext))
1071 return iemRaiseGeneralProtectionFault0(pIemCpu);
1072 }
1073 else
1074 {
1075 uint32_t GCPtrNext32 = pCtx->eip;
1076 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1077 GCPtrNext32 += pIemCpu->cbOpcode;
1078 if (GCPtrNext32 > pCtx->cs.u32Limit)
1079 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1080 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1081 if (!cbToTryRead) /* overflowed */
1082 {
1083 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1084 cbToTryRead = UINT32_MAX;
1085 /** @todo check out wrapping around the code segment. */
1086 }
1087 if (cbToTryRead < cbMin - cbLeft)
1088 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1089 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1090 }
1091
1092 /* Only read up to the end of the page, and make sure we don't read more
1093 than the opcode buffer can hold. */
1094 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1095 if (cbToTryRead > cbLeftOnPage)
1096 cbToTryRead = cbLeftOnPage;
1097 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1098 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1099/** @todo r=bird: Convert assertion into undefined opcode exception? */
1100 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1101
1102#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1103 /* Allow interpretation of patch manager code blocks since they can for
1104 instance throw #PFs for perfectly good reasons. */
1105 if (pIemCpu->fInPatchCode)
1106 {
1107 size_t cbRead = 0;
1108 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1109 AssertRCReturn(rc, rc);
1110 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1111 return VINF_SUCCESS;
1112 }
1113#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1114
1115 RTGCPHYS GCPhys;
1116 uint64_t fFlags;
1117 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1118 if (RT_FAILURE(rc))
1119 {
1120 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1121 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1122 }
1123 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1124 {
1125 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1126 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1127 }
1128 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1129 {
1130 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1131 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1132 }
1133 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1134 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1135 /** @todo Check reserved bits and such stuff. PGM is better at doing
1136 * that, so do it when implementing the guest virtual address
1137 * TLB... */
1138
1139 /*
1140 * Read the bytes at this address.
1141 *
1142 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1143 * and since PATM should only patch the start of an instruction there
1144 * should be no need to check again here.
1145 */
1146 if (!pIemCpu->fBypassHandlers)
1147 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1148 else
1149 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1150 if (rc != VINF_SUCCESS)
1151 {
1152 /** @todo status code handling */
1153 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1154 return rc;
1155 }
1156 pIemCpu->cbOpcode += cbToTryRead;
1157 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1158
1159 return VINF_SUCCESS;
1160}
1161
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pIemCpu The IEM state.
1168 * @param pb Where to return the opcode byte.
1169 */
1170DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1171{
1172 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1173 if (rcStrict == VINF_SUCCESS)
1174 {
1175 uint8_t offOpcode = pIemCpu->offOpcode;
1176 *pb = pIemCpu->abOpcode[offOpcode];
1177 pIemCpu->offOpcode = offOpcode + 1;
1178 }
1179 else
1180 *pb = 0;
1181 return rcStrict;
1182}
1183
1184
1185/**
1186 * Fetches the next opcode byte.
1187 *
1188 * @returns Strict VBox status code.
1189 * @param pIemCpu The IEM state.
1190 * @param pu8 Where to return the opcode byte.
1191 */
1192DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1193{
1194 uint8_t const offOpcode = pIemCpu->offOpcode;
1195 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1196 {
1197 *pu8 = pIemCpu->abOpcode[offOpcode];
1198 pIemCpu->offOpcode = offOpcode + 1;
1199 return VINF_SUCCESS;
1200 }
1201 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1202}
1203
1204
1205/**
1206 * Fetches the next opcode byte, returns automatically on failure.
1207 *
1208 * @param a_pu8 Where to return the opcode byte.
1209 * @remark Implicitly references pIemCpu.
1210 */
1211#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1212 do \
1213 { \
1214 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1215 if (rcStrict2 != VINF_SUCCESS) \
1216 return rcStrict2; \
1217 } while (0)
1218
1219
1220/**
1221 * Fetches the next signed byte from the opcode stream.
1222 *
1223 * @returns Strict VBox status code.
1224 * @param pIemCpu The IEM state.
1225 * @param pi8 Where to return the signed byte.
1226 */
1227DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1228{
1229 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1230}
1231
1232
1233/**
1234 * Fetches the next signed byte from the opcode stream, returning automatically
1235 * on failure.
1236 *
1237 * @param pi8 Where to return the signed byte.
1238 * @remark Implicitly references pIemCpu.
1239 */
1240#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1241 do \
1242 { \
1243 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1244 if (rcStrict2 != VINF_SUCCESS) \
1245 return rcStrict2; \
1246 } while (0)
1247
1248
1249/**
1250 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1251 *
1252 * @returns Strict VBox status code.
1253 * @param pIemCpu The IEM state.
1254 * @param pu16 Where to return the opcode dword.
1255 */
1256DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1257{
1258 uint8_t u8;
1259 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1260 if (rcStrict == VINF_SUCCESS)
1261 *pu16 = (int8_t)u8;
1262 return rcStrict;
1263}
1264
1265
1266/**
1267 * Fetches the next signed byte from the opcode stream, extending it to
1268 * unsigned 16-bit.
1269 *
1270 * @returns Strict VBox status code.
1271 * @param pIemCpu The IEM state.
1272 * @param pu16 Where to return the unsigned word.
1273 */
1274DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1275{
1276 uint8_t const offOpcode = pIemCpu->offOpcode;
1277 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1278 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1279
1280 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1281 pIemCpu->offOpcode = offOpcode + 1;
1282 return VINF_SUCCESS;
1283}
1284
1285
1286/**
1287 * Fetches the next signed byte from the opcode stream and sign-extending it to
1288 * a word, returning automatically on failure.
1289 *
1290 * @param pu16 Where to return the word.
1291 * @remark Implicitly references pIemCpu.
1292 */
1293#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1294 do \
1295 { \
1296 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1297 if (rcStrict2 != VINF_SUCCESS) \
1298 return rcStrict2; \
1299 } while (0)
1300
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pIemCpu The IEM state.
1307 * @param pu32 Where to return the opcode dword.
1308 */
1309DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1310{
1311 uint8_t u8;
1312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1313 if (rcStrict == VINF_SUCCESS)
1314 *pu32 = (int8_t)u8;
1315 return rcStrict;
1316}
1317
1318
1319/**
1320 * Fetches the next signed byte from the opcode stream, extending it to
1321 * unsigned 32-bit.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pIemCpu The IEM state.
1325 * @param pu32 Where to return the unsigned dword.
1326 */
1327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1328{
1329 uint8_t const offOpcode = pIemCpu->offOpcode;
1330 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1331 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1332
1333 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1334 pIemCpu->offOpcode = offOpcode + 1;
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Fetches the next signed byte from the opcode stream and sign-extending it to
1341 * a word, returning automatically on failure.
1342 *
1343 * @param pu32 Where to return the word.
1344 * @remark Implicitly references pIemCpu.
1345 */
1346#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1347 do \
1348 { \
1349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1350 if (rcStrict2 != VINF_SUCCESS) \
1351 return rcStrict2; \
1352 } while (0)
1353
1354
1355/**
1356 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1357 *
1358 * @returns Strict VBox status code.
1359 * @param pIemCpu The IEM state.
1360 * @param pu64 Where to return the opcode qword.
1361 */
1362DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1363{
1364 uint8_t u8;
1365 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1366 if (rcStrict == VINF_SUCCESS)
1367 *pu64 = (int8_t)u8;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next signed byte from the opcode stream, extending it to
1374 * unsigned 64-bit.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pIemCpu The IEM state.
1378 * @param pu64 Where to return the unsigned qword.
1379 */
1380DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1381{
1382 uint8_t const offOpcode = pIemCpu->offOpcode;
1383 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1384 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1385
1386 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1387 pIemCpu->offOpcode = offOpcode + 1;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Fetches the next signed byte from the opcode stream and sign-extending it to
1394 * a word, returning automatically on failure.
1395 *
1396 * @param pu64 Where to return the word.
1397 * @remark Implicitly references pIemCpu.
1398 */
1399#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1400 do \
1401 { \
1402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1403 if (rcStrict2 != VINF_SUCCESS) \
1404 return rcStrict2; \
1405 } while (0)
1406
1407
1408/**
1409 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pIemCpu The IEM state.
1413 * @param pu16 Where to return the opcode word.
1414 */
1415DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1416{
1417 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1418 if (rcStrict == VINF_SUCCESS)
1419 {
1420 uint8_t offOpcode = pIemCpu->offOpcode;
1421 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1422 pIemCpu->offOpcode = offOpcode + 2;
1423 }
1424 else
1425 *pu16 = 0;
1426 return rcStrict;
1427}
1428
1429
1430/**
1431 * Fetches the next opcode word.
1432 *
1433 * @returns Strict VBox status code.
1434 * @param pIemCpu The IEM state.
1435 * @param pu16 Where to return the opcode word.
1436 */
1437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1438{
1439 uint8_t const offOpcode = pIemCpu->offOpcode;
1440 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1441 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1442
1443 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1444 pIemCpu->offOpcode = offOpcode + 2;
1445 return VINF_SUCCESS;
1446}
1447
1448
1449/**
1450 * Fetches the next opcode word, returns automatically on failure.
1451 *
1452 * @param a_pu16 Where to return the opcode word.
1453 * @remark Implicitly references pIemCpu.
1454 */
1455#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1456 do \
1457 { \
1458 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1459 if (rcStrict2 != VINF_SUCCESS) \
1460 return rcStrict2; \
1461 } while (0)
1462
1463
1464/**
1465 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pIemCpu The IEM state.
1469 * @param pu32 Where to return the opcode double word.
1470 */
1471DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1472{
1473 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1474 if (rcStrict == VINF_SUCCESS)
1475 {
1476 uint8_t offOpcode = pIemCpu->offOpcode;
1477 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1478 pIemCpu->offOpcode = offOpcode + 2;
1479 }
1480 else
1481 *pu32 = 0;
1482 return rcStrict;
1483}
1484
1485
1486/**
1487 * Fetches the next opcode word, zero extending it to a double word.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pIemCpu The IEM state.
1491 * @param pu32 Where to return the opcode double word.
1492 */
1493DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1494{
1495 uint8_t const offOpcode = pIemCpu->offOpcode;
1496 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1497 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1498
1499 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1500 pIemCpu->offOpcode = offOpcode + 2;
1501 return VINF_SUCCESS;
1502}
1503
1504
1505/**
1506 * Fetches the next opcode word and zero extends it to a double word, returns
1507 * automatically on failure.
1508 *
1509 * @param a_pu32 Where to return the opcode double word.
1510 * @remark Implicitly references pIemCpu.
1511 */
1512#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1513 do \
1514 { \
1515 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1516 if (rcStrict2 != VINF_SUCCESS) \
1517 return rcStrict2; \
1518 } while (0)
1519
1520
1521/**
1522 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1523 *
1524 * @returns Strict VBox status code.
1525 * @param pIemCpu The IEM state.
1526 * @param pu64 Where to return the opcode quad word.
1527 */
1528DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1529{
1530 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1531 if (rcStrict == VINF_SUCCESS)
1532 {
1533 uint8_t offOpcode = pIemCpu->offOpcode;
1534 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1535 pIemCpu->offOpcode = offOpcode + 2;
1536 }
1537 else
1538 *pu64 = 0;
1539 return rcStrict;
1540}
1541
1542
1543/**
1544 * Fetches the next opcode word, zero extending it to a quad word.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pIemCpu The IEM state.
1548 * @param pu64 Where to return the opcode quad word.
1549 */
1550DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1551{
1552 uint8_t const offOpcode = pIemCpu->offOpcode;
1553 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1554 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1555
1556 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1557 pIemCpu->offOpcode = offOpcode + 2;
1558 return VINF_SUCCESS;
1559}
1560
1561
1562/**
1563 * Fetches the next opcode word and zero extends it to a quad word, returns
1564 * automatically on failure.
1565 *
1566 * @param a_pu64 Where to return the opcode quad word.
1567 * @remark Implicitly references pIemCpu.
1568 */
1569#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1570 do \
1571 { \
1572 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1573 if (rcStrict2 != VINF_SUCCESS) \
1574 return rcStrict2; \
1575 } while (0)
1576
1577
1578/**
1579 * Fetches the next signed word from the opcode stream.
1580 *
1581 * @returns Strict VBox status code.
1582 * @param pIemCpu The IEM state.
1583 * @param pi16 Where to return the signed word.
1584 */
1585DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1586{
1587 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1588}
1589
1590
1591/**
1592 * Fetches the next signed word from the opcode stream, returning automatically
1593 * on failure.
1594 *
1595 * @param pi16 Where to return the signed word.
1596 * @remark Implicitly references pIemCpu.
1597 */
1598#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1599 do \
1600 { \
1601 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1602 if (rcStrict2 != VINF_SUCCESS) \
1603 return rcStrict2; \
1604 } while (0)
1605
1606
1607/**
1608 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1609 *
1610 * @returns Strict VBox status code.
1611 * @param pIemCpu The IEM state.
1612 * @param pu32 Where to return the opcode dword.
1613 */
1614DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1615{
1616 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1617 if (rcStrict == VINF_SUCCESS)
1618 {
1619 uint8_t offOpcode = pIemCpu->offOpcode;
1620 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1621 pIemCpu->abOpcode[offOpcode + 1],
1622 pIemCpu->abOpcode[offOpcode + 2],
1623 pIemCpu->abOpcode[offOpcode + 3]);
1624 pIemCpu->offOpcode = offOpcode + 4;
1625 }
1626 else
1627 *pu32 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode dword.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu32 Where to return the opcode double word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1644
1645 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1646 pIemCpu->abOpcode[offOpcode + 1],
1647 pIemCpu->abOpcode[offOpcode + 2],
1648 pIemCpu->abOpcode[offOpcode + 3]);
1649 pIemCpu->offOpcode = offOpcode + 4;
1650 return VINF_SUCCESS;
1651}
1652
1653
1654/**
1655 * Fetches the next opcode dword, returns automatically on failure.
1656 *
1657 * @param a_pu32 Where to return the opcode dword.
1658 * @remark Implicitly references pIemCpu.
1659 */
1660#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1661 do \
1662 { \
1663 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1664 if (rcStrict2 != VINF_SUCCESS) \
1665 return rcStrict2; \
1666 } while (0)
1667
1668
1669/**
1670 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1671 *
1672 * @returns Strict VBox status code.
1673 * @param pIemCpu The IEM state.
1674 * @param pu32 Where to return the opcode dword.
1675 */
1676DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1677{
1678 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1679 if (rcStrict == VINF_SUCCESS)
1680 {
1681 uint8_t offOpcode = pIemCpu->offOpcode;
1682 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1683 pIemCpu->abOpcode[offOpcode + 1],
1684 pIemCpu->abOpcode[offOpcode + 2],
1685 pIemCpu->abOpcode[offOpcode + 3]);
1686 pIemCpu->offOpcode = offOpcode + 4;
1687 }
1688 else
1689 *pu64 = 0;
1690 return rcStrict;
1691}
1692
1693
1694/**
1695 * Fetches the next opcode dword, zero extending it to a quad word.
1696 *
1697 * @returns Strict VBox status code.
1698 * @param pIemCpu The IEM state.
1699 * @param pu64 Where to return the opcode quad word.
1700 */
1701DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1702{
1703 uint8_t const offOpcode = pIemCpu->offOpcode;
1704 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1705 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1706
1707 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1708 pIemCpu->abOpcode[offOpcode + 1],
1709 pIemCpu->abOpcode[offOpcode + 2],
1710 pIemCpu->abOpcode[offOpcode + 3]);
1711 pIemCpu->offOpcode = offOpcode + 4;
1712 return VINF_SUCCESS;
1713}
1714
1715
1716/**
1717 * Fetches the next opcode dword and zero extends it to a quad word, returns
1718 * automatically on failure.
1719 *
1720 * @param a_pu64 Where to return the opcode quad word.
1721 * @remark Implicitly references pIemCpu.
1722 */
1723#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1724 do \
1725 { \
1726 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1727 if (rcStrict2 != VINF_SUCCESS) \
1728 return rcStrict2; \
1729 } while (0)
1730
1731
1732/**
1733 * Fetches the next signed double word from the opcode stream.
1734 *
1735 * @returns Strict VBox status code.
1736 * @param pIemCpu The IEM state.
1737 * @param pi32 Where to return the signed double word.
1738 */
1739DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1740{
1741 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1742}
1743
1744/**
1745 * Fetches the next signed double word from the opcode stream, returning
1746 * automatically on failure.
1747 *
1748 * @param pi32 Where to return the signed double word.
1749 * @remark Implicitly references pIemCpu.
1750 */
1751#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1752 do \
1753 { \
1754 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1755 if (rcStrict2 != VINF_SUCCESS) \
1756 return rcStrict2; \
1757 } while (0)
1758
1759
1760/**
1761 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1762 *
1763 * @returns Strict VBox status code.
1764 * @param pIemCpu The IEM state.
1765 * @param pu64 Where to return the opcode qword.
1766 */
1767DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1768{
1769 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1770 if (rcStrict == VINF_SUCCESS)
1771 {
1772 uint8_t offOpcode = pIemCpu->offOpcode;
1773 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1774 pIemCpu->abOpcode[offOpcode + 1],
1775 pIemCpu->abOpcode[offOpcode + 2],
1776 pIemCpu->abOpcode[offOpcode + 3]);
1777 pIemCpu->offOpcode = offOpcode + 4;
1778 }
1779 else
1780 *pu64 = 0;
1781 return rcStrict;
1782}
1783
1784
1785/**
1786 * Fetches the next opcode dword, sign extending it into a quad word.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pu64 Where to return the opcode quad word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1793{
1794 uint8_t const offOpcode = pIemCpu->offOpcode;
1795 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1796 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1797
1798 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1799 pIemCpu->abOpcode[offOpcode + 1],
1800 pIemCpu->abOpcode[offOpcode + 2],
1801 pIemCpu->abOpcode[offOpcode + 3]);
1802 *pu64 = i32;
1803 pIemCpu->offOpcode = offOpcode + 4;
1804 return VINF_SUCCESS;
1805}
1806
1807
1808/**
1809 * Fetches the next opcode double word and sign extends it to a quad word,
1810 * returns automatically on failure.
1811 *
1812 * @param a_pu64 Where to return the opcode quad word.
1813 * @remark Implicitly references pIemCpu.
1814 */
1815#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1816 do \
1817 { \
1818 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1819 if (rcStrict2 != VINF_SUCCESS) \
1820 return rcStrict2; \
1821 } while (0)
1822
1823
1824/**
1825 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1826 *
1827 * @returns Strict VBox status code.
1828 * @param pIemCpu The IEM state.
1829 * @param pu64 Where to return the opcode qword.
1830 */
1831DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1832{
1833 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1834 if (rcStrict == VINF_SUCCESS)
1835 {
1836 uint8_t offOpcode = pIemCpu->offOpcode;
1837 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1838 pIemCpu->abOpcode[offOpcode + 1],
1839 pIemCpu->abOpcode[offOpcode + 2],
1840 pIemCpu->abOpcode[offOpcode + 3],
1841 pIemCpu->abOpcode[offOpcode + 4],
1842 pIemCpu->abOpcode[offOpcode + 5],
1843 pIemCpu->abOpcode[offOpcode + 6],
1844 pIemCpu->abOpcode[offOpcode + 7]);
1845 pIemCpu->offOpcode = offOpcode + 8;
1846 }
1847 else
1848 *pu64 = 0;
1849 return rcStrict;
1850}
1851
1852
1853/**
1854 * Fetches the next opcode qword.
1855 *
1856 * @returns Strict VBox status code.
1857 * @param pIemCpu The IEM state.
1858 * @param pu64 Where to return the opcode qword.
1859 */
1860DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1861{
1862 uint8_t const offOpcode = pIemCpu->offOpcode;
1863 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1864 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1865
1866 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1867 pIemCpu->abOpcode[offOpcode + 1],
1868 pIemCpu->abOpcode[offOpcode + 2],
1869 pIemCpu->abOpcode[offOpcode + 3],
1870 pIemCpu->abOpcode[offOpcode + 4],
1871 pIemCpu->abOpcode[offOpcode + 5],
1872 pIemCpu->abOpcode[offOpcode + 6],
1873 pIemCpu->abOpcode[offOpcode + 7]);
1874 pIemCpu->offOpcode = offOpcode + 8;
1875 return VINF_SUCCESS;
1876}
1877
1878
1879/**
1880 * Fetches the next opcode quad word, returns automatically on failure.
1881 *
1882 * @param a_pu64 Where to return the opcode quad word.
1883 * @remark Implicitly references pIemCpu.
1884 */
1885#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1886 do \
1887 { \
1888 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1889 if (rcStrict2 != VINF_SUCCESS) \
1890 return rcStrict2; \
1891 } while (0)
1892
1893
1894/** @name Misc Worker Functions.
1895 * @{
1896 */
1897
1898
1899/**
1900 * Validates a new SS segment.
1901 *
1902 * @returns VBox strict status code.
1903 * @param pIemCpu The IEM per CPU instance data.
1904 * @param pCtx The CPU context.
1905 * @param NewSS The new SS selctor.
1906 * @param uCpl The CPL to load the stack for.
1907 * @param pDesc Where to return the descriptor.
1908 */
1909static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1910{
1911 NOREF(pCtx);
1912
1913 /* Null selectors are not allowed (we're not called for dispatching
1914 interrupts with SS=0 in long mode). */
1915 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1916 {
1917 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1918 return iemRaiseTaskSwitchFault0(pIemCpu);
1919 }
1920
1921 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1922 if ((NewSS & X86_SEL_RPL) != uCpl)
1923 {
1924 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1925 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1926 }
1927
1928 /*
1929 * Read the descriptor.
1930 */
1931 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1932 if (rcStrict != VINF_SUCCESS)
1933 return rcStrict;
1934
1935 /*
1936 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1937 */
1938 if (!pDesc->Legacy.Gen.u1DescType)
1939 {
1940 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1941 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1942 }
1943
1944 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1945 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1946 {
1947 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1948 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1949 }
1950 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1951 {
1952 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1953 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1954 }
1955
1956 /* Is it there? */
1957 /** @todo testcase: Is this checked before the canonical / limit check below? */
1958 if (!pDesc->Legacy.Gen.u1Present)
1959 {
1960 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1961 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1962 }
1963
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1970 * not.
1971 *
1972 * @param a_pIemCpu The IEM per CPU data.
1973 * @param a_pCtx The CPU context.
1974 */
1975#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1976# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1977 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1978 ? (a_pCtx)->eflags.u \
1979 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1980#else
1981# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1982 ( (a_pCtx)->eflags.u )
1983#endif
1984
1985/**
1986 * Updates the EFLAGS in the correct manner wrt. PATM.
1987 *
1988 * @param a_pIemCpu The IEM per CPU data.
1989 * @param a_pCtx The CPU context.
1990 */
1991#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1992# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
1993 do { \
1994 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
1995 (a_pCtx)->eflags.u = (a_fEfl); \
1996 else \
1997 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
1998 } while (0)
1999#else
2000# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2001 do { \
2002 (a_pCtx)->eflags.u = (a_fEfl); \
2003 } while (0)
2004#endif
2005
2006
2007/** @} */
2008
2009/** @name Raising Exceptions.
2010 *
2011 * @{
2012 */
2013
2014/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2015 * @{ */
2016/** CPU exception. */
2017#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2018/** External interrupt (from PIC, APIC, whatever). */
2019#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2020/** Software interrupt (int or into, not bound).
2021 * Returns to the following instruction */
2022#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2023/** Takes an error code. */
2024#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2025/** Takes a CR2. */
2026#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2027/** Generated by the breakpoint instruction. */
2028#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2029/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2030#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2031/** @} */
2032
2033
2034/**
2035 * Loads the specified stack far pointer from the TSS.
2036 *
2037 * @returns VBox strict status code.
2038 * @param pIemCpu The IEM per CPU instance data.
2039 * @param pCtx The CPU context.
2040 * @param uCpl The CPL to load the stack for.
2041 * @param pSelSS Where to return the new stack segment.
2042 * @param puEsp Where to return the new stack pointer.
2043 */
2044static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2045 PRTSEL pSelSS, uint32_t *puEsp)
2046{
2047 VBOXSTRICTRC rcStrict;
2048 Assert(uCpl < 4);
2049 *puEsp = 0; /* make gcc happy */
2050 *pSelSS = 0; /* make gcc happy */
2051
2052 switch (pCtx->tr.Attr.n.u4Type)
2053 {
2054 /*
2055 * 16-bit TSS (X86TSS16).
2056 */
2057 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2058 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2059 {
2060 uint32_t off = uCpl * 4 + 2;
2061 if (off + 4 > pCtx->tr.u32Limit)
2062 {
2063 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2064 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2065 }
2066
2067 uint32_t u32Tmp = 0; /* gcc maybe... */
2068 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2069 if (rcStrict == VINF_SUCCESS)
2070 {
2071 *puEsp = RT_LOWORD(u32Tmp);
2072 *pSelSS = RT_HIWORD(u32Tmp);
2073 return VINF_SUCCESS;
2074 }
2075 break;
2076 }
2077
2078 /*
2079 * 32-bit TSS (X86TSS32).
2080 */
2081 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2082 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2083 {
2084 uint32_t off = uCpl * 8 + 4;
2085 if (off + 7 > pCtx->tr.u32Limit)
2086 {
2087 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2088 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2089 }
2090
2091 uint64_t u64Tmp;
2092 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2093 if (rcStrict == VINF_SUCCESS)
2094 {
2095 *puEsp = u64Tmp & UINT32_MAX;
2096 *pSelSS = (RTSEL)(u64Tmp >> 32);
2097 return VINF_SUCCESS;
2098 }
2099 break;
2100 }
2101
2102 default:
2103 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2104 }
2105 return rcStrict;
2106}
2107
2108
2109/**
2110 * Loads the specified stack pointer from the 64-bit TSS.
2111 *
2112 * @returns VBox strict status code.
2113 * @param pIemCpu The IEM per CPU instance data.
2114 * @param pCtx The CPU context.
2115 * @param uCpl The CPL to load the stack for.
2116 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2117 * @param puRsp Where to return the new stack pointer.
2118 */
2119static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2120 uint64_t *puRsp)
2121{
2122 Assert(uCpl < 4);
2123 Assert(uIst < 8);
2124 *puRsp = 0; /* make gcc happy */
2125
2126 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2127
2128 uint32_t off;
2129 if (uIst)
2130 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2131 else
2132 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2133 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2134 {
2135 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2136 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2137 }
2138
2139 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2140}
2141
2142
2143/**
2144 * Adjust the CPU state according to the exception being raised.
2145 *
2146 * @param pCtx The CPU context.
2147 * @param u8Vector The exception that has been raised.
2148 */
2149DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2150{
2151 switch (u8Vector)
2152 {
2153 case X86_XCPT_DB:
2154 pCtx->dr[7] &= ~X86_DR7_GD;
2155 break;
2156 /** @todo Read the AMD and Intel exception reference... */
2157 }
2158}
2159
2160
2161/**
2162 * Implements exceptions and interrupts for real mode.
2163 *
2164 * @returns VBox strict status code.
2165 * @param pIemCpu The IEM per CPU instance data.
2166 * @param pCtx The CPU context.
2167 * @param cbInstr The number of bytes to offset rIP by in the return
2168 * address.
2169 * @param u8Vector The interrupt / exception vector number.
2170 * @param fFlags The flags.
2171 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2172 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2173 */
2174static VBOXSTRICTRC
2175iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2176 PCPUMCTX pCtx,
2177 uint8_t cbInstr,
2178 uint8_t u8Vector,
2179 uint32_t fFlags,
2180 uint16_t uErr,
2181 uint64_t uCr2)
2182{
2183 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2184 NOREF(uErr); NOREF(uCr2);
2185
2186 /*
2187 * Read the IDT entry.
2188 */
2189 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2190 {
2191 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2192 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2193 }
2194 RTFAR16 Idte;
2195 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2196 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2197 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2198 return rcStrict;
2199
2200 /*
2201 * Push the stack frame.
2202 */
2203 uint16_t *pu16Frame;
2204 uint64_t uNewRsp;
2205 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208
2209 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2210 pu16Frame[2] = (uint16_t)fEfl;
2211 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2212 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2213 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2214 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2215 return rcStrict;
2216
2217 /*
2218 * Load the vector address into cs:ip and make exception specific state
2219 * adjustments.
2220 */
2221 pCtx->cs.Sel = Idte.sel;
2222 pCtx->cs.ValidSel = Idte.sel;
2223 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2224 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2225 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2226 pCtx->rip = Idte.off;
2227 fEfl &= ~X86_EFL_IF;
2228 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2229
2230 /** @todo do we actually do this in real mode? */
2231 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2232 iemRaiseXcptAdjustState(pCtx, u8Vector);
2233
2234 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2235}
2236
2237
2238/**
2239 * Loads a NULL data selector into when coming from V8086 mode.
2240 *
2241 * @param pIemCpu The IEM per CPU instance data.
2242 * @param pSReg Pointer to the segment register.
2243 */
2244static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2245{
2246 pSReg->Sel = 0;
2247 pSReg->ValidSel = 0;
2248 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2249 {
2250 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2251 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2252 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2253 }
2254 else
2255 {
2256 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2257 /** @todo check this on AMD-V */
2258 pSReg->u64Base = 0;
2259 pSReg->u32Limit = 0;
2260 }
2261}
2262
2263
2264/**
2265 * Loads a segment selector during a task switch in V8086 mode.
2266 *
2267 * @param pIemCpu The IEM per CPU instance data.
2268 * @param pSReg Pointer to the segment register.
2269 * @param uSel The selector value to load.
2270 */
2271static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2272{
2273 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2274 pSReg->Sel = uSel;
2275 pSReg->ValidSel = uSel;
2276 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2277 pSReg->u64Base = uSel << 4;
2278 pSReg->u32Limit = 0xffff;
2279 pSReg->Attr.u = 0xf3;
2280}
2281
2282
2283/**
2284 * Loads a NULL data selector into a selector register, both the hidden and
2285 * visible parts, in protected mode.
2286 *
2287 * @param pIemCpu The IEM state of the calling EMT.
2288 * @param pSReg Pointer to the segment register.
2289 * @param uRpl The RPL.
2290 */
2291static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2292{
2293 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2294 * data selector in protected mode. */
2295 pSReg->Sel = uRpl;
2296 pSReg->ValidSel = uRpl;
2297 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2298 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2299 {
2300 /* VT-x (Intel 3960x) observed doing something like this. */
2301 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2302 pSReg->u32Limit = UINT32_MAX;
2303 pSReg->u64Base = 0;
2304 }
2305 else
2306 {
2307 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2308 pSReg->u32Limit = 0;
2309 pSReg->u64Base = 0;
2310 }
2311}
2312
2313
2314/**
2315 * Loads a segment selector during a task switch in protected mode. In this task
2316 * switch scenario, we would throw #TS exceptions rather than #GPs.
2317 *
2318 * @returns VBox strict status code.
2319 * @param pIemCpu The IEM per CPU instance data.
2320 * @param pSReg Pointer to the segment register.
2321 * @param uSel The new selector value.
2322 *
2323 * @remarks This does -NOT- handle CS or SS.
2324 * @remarks This expects pIemCpu->uCpl to be up to date.
2325 */
2326static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2327{
2328 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2329
2330 /* Null data selector. */
2331 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2332 {
2333 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2335 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2336 return VINF_SUCCESS;
2337 }
2338
2339 /* Fetch the descriptor. */
2340 IEMSELDESC Desc;
2341 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2342 if (rcStrict != VINF_SUCCESS)
2343 {
2344 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2345 VBOXSTRICTRC_VAL(rcStrict)));
2346 return rcStrict;
2347 }
2348
2349 /* Must be a data segment or readable code segment. */
2350 if ( !Desc.Legacy.Gen.u1DescType
2351 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2352 {
2353 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2354 Desc.Legacy.Gen.u4Type));
2355 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2356 }
2357
2358 /* Check privileges for data segments and non-conforming code segments. */
2359 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2360 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2361 {
2362 /* The RPL and the new CPL must be less than or equal to the DPL. */
2363 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2364 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2365 {
2366 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2367 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2368 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2369 }
2370 }
2371
2372 /* Is it there? */
2373 if (!Desc.Legacy.Gen.u1Present)
2374 {
2375 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2376 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2377 }
2378
2379 /* The base and limit. */
2380 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2381 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2382
2383 /*
2384 * Ok, everything checked out fine. Now set the accessed bit before
2385 * committing the result into the registers.
2386 */
2387 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2388 {
2389 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2390 if (rcStrict != VINF_SUCCESS)
2391 return rcStrict;
2392 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2393 }
2394
2395 /* Commit */
2396 pSReg->Sel = uSel;
2397 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2398 pSReg->u32Limit = cbLimit;
2399 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2400 pSReg->ValidSel = uSel;
2401 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2402 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2403 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2404
2405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2406 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2407 return VINF_SUCCESS;
2408}
2409
2410
2411/**
2412 * Performs a task switch.
2413 *
2414 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2415 * caller is responsible for performing the necessary checks (like DPL, TSS
2416 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2417 * reference for JMP, CALL, IRET.
2418 *
2419 * If the task switch is the due to a software interrupt or hardware exception,
2420 * the caller is responsible for validating the TSS selector and descriptor. See
2421 * Intel Instruction reference for INT n.
2422 *
2423 * @returns VBox strict status code.
2424 * @param pIemCpu The IEM per CPU instance data.
2425 * @param pCtx The CPU context.
2426 * @param enmTaskSwitch What caused this task switch.
2427 * @param uNextEip The EIP effective after the task switch.
2428 * @param fFlags The flags.
2429 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2430 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2431 * @param SelTSS The TSS selector of the new task.
2432 * @param pNewDescTSS Pointer to the new TSS descriptor.
2433 */
2434static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu,
2435 PCPUMCTX pCtx,
2436 IEMTASKSWITCH enmTaskSwitch,
2437 uint32_t uNextEip,
2438 uint32_t fFlags,
2439 uint16_t uErr,
2440 uint64_t uCr2,
2441 RTSEL SelTSS,
2442 PIEMSELDESC pNewDescTSS)
2443{
2444 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2445 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2446
2447 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2448 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2450 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2451 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2452
2453 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2454 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2455
2456 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2457 fIsNewTSS386, pCtx->eip, uNextEip));
2458
2459 /* Update CR2 in case it's a page-fault. */
2460 /** @todo This should probably be done much earlier in IEM/PGM. See
2461 * @bugref{5653} comment #49. */
2462 if (fFlags & IEM_XCPT_FLAGS_CR2)
2463 pCtx->cr2 = uCr2;
2464
2465 /*
2466 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2467 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2468 */
2469 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2470 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2471 if (uNewTSSLimit < uNewTSSLimitMin)
2472 {
2473 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2474 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2475 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2476 }
2477
2478 /*
2479 * Check the current TSS limit. The last written byte to the current TSS during the
2480 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2481 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2482 *
2483 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2484 * end up with smaller than "legal" TSS limits.
2485 */
2486 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2487 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2488 if (uCurTSSLimit < uCurTSSLimitMin)
2489 {
2490 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2491 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2492 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2493 }
2494
2495 /*
2496 * Verify that the new TSS can be accessed and map it. Map only the required contents
2497 * and not the entire TSS.
2498 */
2499 void *pvNewTSS;
2500 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2501 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2502 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2503 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2504 * not perform correct translation if this happens. See Intel spec. 7.2.1
2505 * "Task-State Segment" */
2506 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2507 if (rcStrict != VINF_SUCCESS)
2508 {
2509 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2510 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2511 return rcStrict;
2512 }
2513
2514 /*
2515 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2516 */
2517 uint32_t u32EFlags = pCtx->eflags.u32;
2518 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2519 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2520 {
2521 PX86DESC pDescCurTSS;
2522 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2523 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2524 if (rcStrict != VINF_SUCCESS)
2525 {
2526 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2527 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2528 return rcStrict;
2529 }
2530
2531 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2532 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2533 if (rcStrict != VINF_SUCCESS)
2534 {
2535 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2536 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2537 return rcStrict;
2538 }
2539
2540 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2541 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2542 {
2543 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2544 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2545 u32EFlags &= ~X86_EFL_NT;
2546 }
2547 }
2548
2549 /*
2550 * Save the CPU state into the current TSS.
2551 */
2552 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2553 if (GCPtrNewTSS == GCPtrCurTSS)
2554 {
2555 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2556 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2557 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2558 }
2559 if (fIsNewTSS386)
2560 {
2561 /*
2562 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2563 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2564 */
2565 void *pvCurTSS32;
2566 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2567 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2568 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2569 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2570 if (rcStrict != VINF_SUCCESS)
2571 {
2572 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2573 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2574 return rcStrict;
2575 }
2576
2577 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2578 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2579 pCurTSS32->eip = uNextEip;
2580 pCurTSS32->eflags = u32EFlags;
2581 pCurTSS32->eax = pCtx->eax;
2582 pCurTSS32->ecx = pCtx->ecx;
2583 pCurTSS32->edx = pCtx->edx;
2584 pCurTSS32->ebx = pCtx->ebx;
2585 pCurTSS32->esp = pCtx->esp;
2586 pCurTSS32->ebp = pCtx->ebp;
2587 pCurTSS32->esi = pCtx->esi;
2588 pCurTSS32->edi = pCtx->edi;
2589 pCurTSS32->es = pCtx->es.Sel;
2590 pCurTSS32->cs = pCtx->cs.Sel;
2591 pCurTSS32->ss = pCtx->ss.Sel;
2592 pCurTSS32->ds = pCtx->ds.Sel;
2593 pCurTSS32->fs = pCtx->fs.Sel;
2594 pCurTSS32->gs = pCtx->gs.Sel;
2595
2596 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2597 if (rcStrict != VINF_SUCCESS)
2598 {
2599 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2600 VBOXSTRICTRC_VAL(rcStrict)));
2601 return rcStrict;
2602 }
2603 }
2604 else
2605 {
2606 /*
2607 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2608 */
2609 void *pvCurTSS16;
2610 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2611 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2612 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2613 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2614 if (rcStrict != VINF_SUCCESS)
2615 {
2616 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2617 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2618 return rcStrict;
2619 }
2620
2621 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2622 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2623 pCurTSS16->ip = uNextEip;
2624 pCurTSS16->flags = u32EFlags;
2625 pCurTSS16->ax = pCtx->ax;
2626 pCurTSS16->cx = pCtx->cx;
2627 pCurTSS16->dx = pCtx->dx;
2628 pCurTSS16->bx = pCtx->bx;
2629 pCurTSS16->sp = pCtx->sp;
2630 pCurTSS16->bp = pCtx->bp;
2631 pCurTSS16->si = pCtx->si;
2632 pCurTSS16->di = pCtx->di;
2633 pCurTSS16->es = pCtx->es.Sel;
2634 pCurTSS16->cs = pCtx->cs.Sel;
2635 pCurTSS16->ss = pCtx->ss.Sel;
2636 pCurTSS16->ds = pCtx->ds.Sel;
2637
2638 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2639 if (rcStrict != VINF_SUCCESS)
2640 {
2641 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2642 VBOXSTRICTRC_VAL(rcStrict)));
2643 return rcStrict;
2644 }
2645 }
2646
2647 /*
2648 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2649 */
2650 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2651 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2652 {
2653 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2654 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2655 pNewTSS->selPrev = pCtx->tr.Sel;
2656 }
2657
2658 /*
2659 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2660 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2661 */
2662 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2663 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2664 bool fNewDebugTrap;
2665 if (fIsNewTSS386)
2666 {
2667 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2668 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2669 uNewEip = pNewTSS32->eip;
2670 uNewEflags = pNewTSS32->eflags;
2671 uNewEax = pNewTSS32->eax;
2672 uNewEcx = pNewTSS32->ecx;
2673 uNewEdx = pNewTSS32->edx;
2674 uNewEbx = pNewTSS32->ebx;
2675 uNewEsp = pNewTSS32->esp;
2676 uNewEbp = pNewTSS32->ebp;
2677 uNewEsi = pNewTSS32->esi;
2678 uNewEdi = pNewTSS32->edi;
2679 uNewES = pNewTSS32->es;
2680 uNewCS = pNewTSS32->cs;
2681 uNewSS = pNewTSS32->ss;
2682 uNewDS = pNewTSS32->ds;
2683 uNewFS = pNewTSS32->fs;
2684 uNewGS = pNewTSS32->gs;
2685 uNewLdt = pNewTSS32->selLdt;
2686 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2687 }
2688 else
2689 {
2690 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2691 uNewCr3 = 0;
2692 uNewEip = pNewTSS16->ip;
2693 uNewEflags = pNewTSS16->flags;
2694 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2695 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2696 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2697 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2698 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2699 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2700 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2701 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2702 uNewES = pNewTSS16->es;
2703 uNewCS = pNewTSS16->cs;
2704 uNewSS = pNewTSS16->ss;
2705 uNewDS = pNewTSS16->ds;
2706 uNewFS = 0;
2707 uNewGS = 0;
2708 uNewLdt = pNewTSS16->selLdt;
2709 fNewDebugTrap = false;
2710 }
2711
2712 if (GCPtrNewTSS == GCPtrCurTSS)
2713 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2714 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2715
2716 /*
2717 * We're done accessing the new TSS.
2718 */
2719 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2723 return rcStrict;
2724 }
2725
2726 /*
2727 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2728 */
2729 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2730 {
2731 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2732 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2733 if (rcStrict != VINF_SUCCESS)
2734 {
2735 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2736 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2737 return rcStrict;
2738 }
2739
2740 /* Check that the descriptor indicates the new TSS is available (not busy). */
2741 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2742 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2743 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2744
2745 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2746 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2747 if (rcStrict != VINF_SUCCESS)
2748 {
2749 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2750 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2751 return rcStrict;
2752 }
2753 }
2754
2755 /*
2756 * From this point on, we're technically in the new task. We will defer exceptions
2757 * until the completion of the task switch but before executing any instructions in the new task.
2758 */
2759 pCtx->tr.Sel = SelTSS;
2760 pCtx->tr.ValidSel = SelTSS;
2761 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2762 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2763 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2764 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2765 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2766
2767 /* Set the busy bit in TR. */
2768 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2769 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2770 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2771 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2772 {
2773 uNewEflags |= X86_EFL_NT;
2774 }
2775
2776 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2777 pCtx->cr0 |= X86_CR0_TS;
2778 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2779
2780 pCtx->eip = uNewEip;
2781 pCtx->eax = uNewEax;
2782 pCtx->ecx = uNewEcx;
2783 pCtx->edx = uNewEdx;
2784 pCtx->ebx = uNewEbx;
2785 pCtx->esp = uNewEsp;
2786 pCtx->ebp = uNewEbp;
2787 pCtx->esi = uNewEsi;
2788 pCtx->edi = uNewEdi;
2789
2790 uNewEflags &= X86_EFL_LIVE_MASK;
2791 uNewEflags |= X86_EFL_RA1_MASK;
2792 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2793
2794 /*
2795 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2796 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2797 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2798 */
2799 pCtx->es.Sel = uNewES;
2800 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2801 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2802
2803 pCtx->cs.Sel = uNewCS;
2804 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2805 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2806
2807 pCtx->ss.Sel = uNewSS;
2808 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2809 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2810
2811 pCtx->ds.Sel = uNewDS;
2812 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2813 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2814
2815 pCtx->fs.Sel = uNewFS;
2816 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2817 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2818
2819 pCtx->gs.Sel = uNewGS;
2820 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2821 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2822 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2823
2824 pCtx->ldtr.Sel = uNewLdt;
2825 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2826 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2827 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2828
2829 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2830 {
2831 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2832 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2833 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2834 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2835 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2836 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2837 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2838 }
2839
2840 /*
2841 * Switch CR3 for the new task.
2842 */
2843 if ( fIsNewTSS386
2844 && (pCtx->cr0 & X86_CR0_PG))
2845 {
2846 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2847 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2848 {
2849 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2850 AssertRCSuccessReturn(rc, rc);
2851 }
2852 else
2853 pCtx->cr3 = uNewCr3;
2854
2855 /* Inform PGM. */
2856 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2857 {
2858 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2859 AssertRCReturn(rc, rc);
2860 /* ignore informational status codes */
2861 }
2862 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2863 }
2864
2865 /*
2866 * Switch LDTR for the new task.
2867 */
2868 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2869 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2870 else
2871 {
2872 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2873
2874 IEMSELDESC DescNewLdt;
2875 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2876 if (rcStrict != VINF_SUCCESS)
2877 {
2878 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2879 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2880 return rcStrict;
2881 }
2882 if ( !DescNewLdt.Legacy.Gen.u1Present
2883 || DescNewLdt.Legacy.Gen.u1DescType
2884 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2885 {
2886 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2887 uNewLdt, DescNewLdt.Legacy.u));
2888 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2889 }
2890
2891 pCtx->ldtr.ValidSel = uNewLdt;
2892 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2893 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2894 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2895 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2896 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2897 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2898 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2899 }
2900
2901 IEMSELDESC DescSS;
2902 if (IEM_IS_V86_MODE(pIemCpu))
2903 {
2904 pIemCpu->uCpl = 3;
2905 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2906 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2907 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2908 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2909 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2910 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2911 }
2912 else
2913 {
2914 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2915
2916 /*
2917 * Load the stack segment for the new task.
2918 */
2919 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2920 {
2921 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2922 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2923 }
2924
2925 /* Fetch the descriptor. */
2926 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2927 if (rcStrict != VINF_SUCCESS)
2928 {
2929 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2930 VBOXSTRICTRC_VAL(rcStrict)));
2931 return rcStrict;
2932 }
2933
2934 /* SS must be a data segment and writable. */
2935 if ( !DescSS.Legacy.Gen.u1DescType
2936 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2937 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2938 {
2939 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2940 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2941 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2945 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2946 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2947 {
2948 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2949 uNewCpl));
2950 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2951 }
2952
2953 /* Is it there? */
2954 if (!DescSS.Legacy.Gen.u1Present)
2955 {
2956 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2957 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2958 }
2959
2960 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2961 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2962
2963 /* Set the accessed bit before committing the result into SS. */
2964 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2965 {
2966 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2967 if (rcStrict != VINF_SUCCESS)
2968 return rcStrict;
2969 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2970 }
2971
2972 /* Commit SS. */
2973 pCtx->ss.Sel = uNewSS;
2974 pCtx->ss.ValidSel = uNewSS;
2975 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2976 pCtx->ss.u32Limit = cbLimit;
2977 pCtx->ss.u64Base = u64Base;
2978 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
2980
2981 /* CPL has changed, update IEM before loading rest of segments. */
2982 pIemCpu->uCpl = uNewCpl;
2983
2984 /*
2985 * Load the data segments for the new task.
2986 */
2987 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
2988 if (rcStrict != VINF_SUCCESS)
2989 return rcStrict;
2990 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
2991 if (rcStrict != VINF_SUCCESS)
2992 return rcStrict;
2993 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
2994 if (rcStrict != VINF_SUCCESS)
2995 return rcStrict;
2996 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
2997 if (rcStrict != VINF_SUCCESS)
2998 return rcStrict;
2999
3000 /*
3001 * Load the code segment for the new task.
3002 */
3003 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3004 {
3005 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3006 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 /* Fetch the descriptor. */
3010 IEMSELDESC DescCS;
3011 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3012 if (rcStrict != VINF_SUCCESS)
3013 {
3014 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3015 return rcStrict;
3016 }
3017
3018 /* CS must be a code segment. */
3019 if ( !DescCS.Legacy.Gen.u1DescType
3020 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3021 {
3022 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3023 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3024 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3025 }
3026
3027 /* For conforming CS, DPL must be less than or equal to the RPL. */
3028 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3029 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3030 {
3031 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3032 DescCS.Legacy.Gen.u2Dpl));
3033 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3034 }
3035
3036 /* For non-conforming CS, DPL must match RPL. */
3037 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3038 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3039 {
3040 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3041 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3042 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3043 }
3044
3045 /* Is it there? */
3046 if (!DescCS.Legacy.Gen.u1Present)
3047 {
3048 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3049 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3050 }
3051
3052 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3053 u64Base = X86DESC_BASE(&DescCS.Legacy);
3054
3055 /* Set the accessed bit before committing the result into CS. */
3056 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3057 {
3058 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3059 if (rcStrict != VINF_SUCCESS)
3060 return rcStrict;
3061 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3062 }
3063
3064 /* Commit CS. */
3065 pCtx->cs.Sel = uNewCS;
3066 pCtx->cs.ValidSel = uNewCS;
3067 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3068 pCtx->cs.u32Limit = cbLimit;
3069 pCtx->cs.u64Base = u64Base;
3070 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3072 }
3073
3074 /** @todo Debug trap. */
3075 if (fIsNewTSS386 && fNewDebugTrap)
3076 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3077
3078 /*
3079 * Construct the error code masks based on what caused this task switch.
3080 * See Intel Instruction reference for INT.
3081 */
3082 uint16_t uExt;
3083 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3084 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3085 {
3086 uExt = 1;
3087 }
3088 else
3089 uExt = 0;
3090
3091 /*
3092 * Push any error code on to the new stack.
3093 */
3094 if (fFlags & IEM_XCPT_FLAGS_ERR)
3095 {
3096 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3097 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3098 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3099
3100 /* Check that there is sufficient space on the stack. */
3101 /** @todo Factor out segment limit checking for normal/expand down segments
3102 * into a separate function. */
3103 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3104 {
3105 if ( pCtx->esp - 1 > cbLimitSS
3106 || pCtx->esp < cbStackFrame)
3107 {
3108 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3109 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3110 cbStackFrame));
3111 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3112 }
3113 }
3114 else
3115 {
3116 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3117 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3118 {
3119 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3120 cbStackFrame));
3121 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3122 }
3123 }
3124
3125
3126 if (fIsNewTSS386)
3127 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3128 else
3129 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3130 if (rcStrict != VINF_SUCCESS)
3131 {
3132 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3133 VBOXSTRICTRC_VAL(rcStrict)));
3134 return rcStrict;
3135 }
3136 }
3137
3138 /* Check the new EIP against the new CS limit. */
3139 if (pCtx->eip > pCtx->cs.u32Limit)
3140 {
3141 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3142 pCtx->eip, pCtx->cs.u32Limit));
3143 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3144 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3145 }
3146
3147 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3148 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3149}
3150
3151
3152/**
3153 * Implements exceptions and interrupts for protected mode.
3154 *
3155 * @returns VBox strict status code.
3156 * @param pIemCpu The IEM per CPU instance data.
3157 * @param pCtx The CPU context.
3158 * @param cbInstr The number of bytes to offset rIP by in the return
3159 * address.
3160 * @param u8Vector The interrupt / exception vector number.
3161 * @param fFlags The flags.
3162 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3163 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3164 */
3165static VBOXSTRICTRC
3166iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3167 PCPUMCTX pCtx,
3168 uint8_t cbInstr,
3169 uint8_t u8Vector,
3170 uint32_t fFlags,
3171 uint16_t uErr,
3172 uint64_t uCr2)
3173{
3174 /*
3175 * Read the IDT entry.
3176 */
3177 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3178 {
3179 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3180 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182 X86DESC Idte;
3183 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3184 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3185 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3186 return rcStrict;
3187 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3188 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3189 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3190
3191 /*
3192 * Check the descriptor type, DPL and such.
3193 * ASSUMES this is done in the same order as described for call-gate calls.
3194 */
3195 if (Idte.Gate.u1DescType)
3196 {
3197 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3198 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3199 }
3200 bool fTaskGate = false;
3201 uint8_t f32BitGate = true;
3202 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3203 switch (Idte.Gate.u4Type)
3204 {
3205 case X86_SEL_TYPE_SYS_UNDEFINED:
3206 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3207 case X86_SEL_TYPE_SYS_LDT:
3208 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3209 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3210 case X86_SEL_TYPE_SYS_UNDEFINED2:
3211 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3212 case X86_SEL_TYPE_SYS_UNDEFINED3:
3213 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3214 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3215 case X86_SEL_TYPE_SYS_UNDEFINED4:
3216 {
3217 /** @todo check what actually happens when the type is wrong...
3218 * esp. call gates. */
3219 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3220 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 case X86_SEL_TYPE_SYS_286_INT_GATE:
3224 f32BitGate = false;
3225 case X86_SEL_TYPE_SYS_386_INT_GATE:
3226 fEflToClear |= X86_EFL_IF;
3227 break;
3228
3229 case X86_SEL_TYPE_SYS_TASK_GATE:
3230 fTaskGate = true;
3231#ifndef IEM_IMPLEMENTS_TASKSWITCH
3232 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3233#endif
3234 break;
3235
3236 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3237 f32BitGate = false;
3238 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3239 break;
3240
3241 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3242 }
3243
3244 /* Check DPL against CPL if applicable. */
3245 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3246 {
3247 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3250 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3251 }
3252 }
3253
3254 /* Is it there? */
3255 if (!Idte.Gate.u1Present)
3256 {
3257 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3258 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3259 }
3260
3261 /* Is it a task-gate? */
3262 if (fTaskGate)
3263 {
3264 /*
3265 * Construct the error code masks based on what caused this task switch.
3266 * See Intel Instruction reference for INT.
3267 */
3268 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3269 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3270 RTSEL SelTSS = Idte.Gate.u16Sel;
3271
3272 /*
3273 * Fetch the TSS descriptor in the GDT.
3274 */
3275 IEMSELDESC DescTSS;
3276 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3277 if (rcStrict != VINF_SUCCESS)
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3280 VBOXSTRICTRC_VAL(rcStrict)));
3281 return rcStrict;
3282 }
3283
3284 /* The TSS descriptor must be a system segment and be available (not busy). */
3285 if ( DescTSS.Legacy.Gen.u1DescType
3286 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3287 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3288 {
3289 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3290 u8Vector, SelTSS, DescTSS.Legacy.au64));
3291 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3292 }
3293
3294 /* The TSS must be present. */
3295 if (!DescTSS.Legacy.Gen.u1Present)
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3298 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3299 }
3300
3301 /* Do the actual task switch. */
3302 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3303 }
3304
3305 /* A null CS is bad. */
3306 RTSEL NewCS = Idte.Gate.u16Sel;
3307 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3308 {
3309 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3310 return iemRaiseGeneralProtectionFault0(pIemCpu);
3311 }
3312
3313 /* Fetch the descriptor for the new CS. */
3314 IEMSELDESC DescCS;
3315 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3316 if (rcStrict != VINF_SUCCESS)
3317 {
3318 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3319 return rcStrict;
3320 }
3321
3322 /* Must be a code segment. */
3323 if (!DescCS.Legacy.Gen.u1DescType)
3324 {
3325 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3326 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3327 }
3328 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3329 {
3330 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3331 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3332 }
3333
3334 /* Don't allow lowering the privilege level. */
3335 /** @todo Does the lowering of privileges apply to software interrupts
3336 * only? This has bearings on the more-privileged or
3337 * same-privilege stack behavior further down. A testcase would
3338 * be nice. */
3339 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3340 {
3341 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3342 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3343 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3344 }
3345
3346 /* Make sure the selector is present. */
3347 if (!DescCS.Legacy.Gen.u1Present)
3348 {
3349 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3350 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3351 }
3352
3353 /* Check the new EIP against the new CS limit. */
3354 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3355 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3356 ? Idte.Gate.u16OffsetLow
3357 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3358 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3359 if (uNewEip > cbLimitCS)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3362 u8Vector, uNewEip, cbLimitCS, NewCS));
3363 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3364 }
3365
3366 /* Calc the flag image to push. */
3367 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3368 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3369 fEfl &= ~X86_EFL_RF;
3370 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3371 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3372
3373 /* From V8086 mode only go to CPL 0. */
3374 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3375 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3376 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3377 {
3378 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3379 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3380 }
3381
3382 /*
3383 * If the privilege level changes, we need to get a new stack from the TSS.
3384 * This in turns means validating the new SS and ESP...
3385 */
3386 if (uNewCpl != pIemCpu->uCpl)
3387 {
3388 RTSEL NewSS;
3389 uint32_t uNewEsp;
3390 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3391 if (rcStrict != VINF_SUCCESS)
3392 return rcStrict;
3393
3394 IEMSELDESC DescSS;
3395 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3396 if (rcStrict != VINF_SUCCESS)
3397 return rcStrict;
3398
3399 /* Check that there is sufficient space for the stack frame. */
3400 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3401 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3402 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3403 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3404
3405 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3406 {
3407 if ( uNewEsp - 1 > cbLimitSS
3408 || uNewEsp < cbStackFrame)
3409 {
3410 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3411 u8Vector, NewSS, uNewEsp, cbStackFrame));
3412 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3413 }
3414 }
3415 else
3416 {
3417 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3418 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3419 {
3420 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3421 u8Vector, NewSS, uNewEsp, cbStackFrame));
3422 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3423 }
3424 }
3425
3426 /*
3427 * Start making changes.
3428 */
3429
3430 /* Create the stack frame. */
3431 RTPTRUNION uStackFrame;
3432 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3433 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436 void * const pvStackFrame = uStackFrame.pv;
3437 if (f32BitGate)
3438 {
3439 if (fFlags & IEM_XCPT_FLAGS_ERR)
3440 *uStackFrame.pu32++ = uErr;
3441 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3442 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3443 uStackFrame.pu32[2] = fEfl;
3444 uStackFrame.pu32[3] = pCtx->esp;
3445 uStackFrame.pu32[4] = pCtx->ss.Sel;
3446 if (fEfl & X86_EFL_VM)
3447 {
3448 uStackFrame.pu32[1] = pCtx->cs.Sel;
3449 uStackFrame.pu32[5] = pCtx->es.Sel;
3450 uStackFrame.pu32[6] = pCtx->ds.Sel;
3451 uStackFrame.pu32[7] = pCtx->fs.Sel;
3452 uStackFrame.pu32[8] = pCtx->gs.Sel;
3453 }
3454 }
3455 else
3456 {
3457 if (fFlags & IEM_XCPT_FLAGS_ERR)
3458 *uStackFrame.pu16++ = uErr;
3459 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3460 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3461 uStackFrame.pu16[2] = fEfl;
3462 uStackFrame.pu16[3] = pCtx->sp;
3463 uStackFrame.pu16[4] = pCtx->ss.Sel;
3464 if (fEfl & X86_EFL_VM)
3465 {
3466 uStackFrame.pu16[1] = pCtx->cs.Sel;
3467 uStackFrame.pu16[5] = pCtx->es.Sel;
3468 uStackFrame.pu16[6] = pCtx->ds.Sel;
3469 uStackFrame.pu16[7] = pCtx->fs.Sel;
3470 uStackFrame.pu16[8] = pCtx->gs.Sel;
3471 }
3472 }
3473 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3474 if (rcStrict != VINF_SUCCESS)
3475 return rcStrict;
3476
3477 /* Mark the selectors 'accessed' (hope this is the correct time). */
3478 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3479 * after pushing the stack frame? (Write protect the gdt + stack to
3480 * find out.) */
3481 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3482 {
3483 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3484 if (rcStrict != VINF_SUCCESS)
3485 return rcStrict;
3486 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3487 }
3488
3489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3490 {
3491 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3495 }
3496
3497 /*
3498 * Start comitting the register changes (joins with the DPL=CPL branch).
3499 */
3500 pCtx->ss.Sel = NewSS;
3501 pCtx->ss.ValidSel = NewSS;
3502 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3503 pCtx->ss.u32Limit = cbLimitSS;
3504 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3505 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3506 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3507 pIemCpu->uCpl = uNewCpl;
3508
3509 if (fEfl & X86_EFL_VM)
3510 {
3511 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3512 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3513 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3514 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3515 }
3516 }
3517 /*
3518 * Same privilege, no stack change and smaller stack frame.
3519 */
3520 else
3521 {
3522 uint64_t uNewRsp;
3523 RTPTRUNION uStackFrame;
3524 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3525 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3526 if (rcStrict != VINF_SUCCESS)
3527 return rcStrict;
3528 void * const pvStackFrame = uStackFrame.pv;
3529
3530 if (f32BitGate)
3531 {
3532 if (fFlags & IEM_XCPT_FLAGS_ERR)
3533 *uStackFrame.pu32++ = uErr;
3534 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3535 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3536 uStackFrame.pu32[2] = fEfl;
3537 }
3538 else
3539 {
3540 if (fFlags & IEM_XCPT_FLAGS_ERR)
3541 *uStackFrame.pu16++ = uErr;
3542 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3543 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3544 uStackFrame.pu16[2] = fEfl;
3545 }
3546 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3547 if (rcStrict != VINF_SUCCESS)
3548 return rcStrict;
3549
3550 /* Mark the CS selector as 'accessed'. */
3551 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3552 {
3553 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3554 if (rcStrict != VINF_SUCCESS)
3555 return rcStrict;
3556 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3557 }
3558
3559 /*
3560 * Start committing the register changes (joins with the other branch).
3561 */
3562 pCtx->rsp = uNewRsp;
3563 }
3564
3565 /* ... register committing continues. */
3566 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3567 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3568 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3569 pCtx->cs.u32Limit = cbLimitCS;
3570 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3571 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3572
3573 pCtx->rip = uNewEip;
3574 fEfl &= ~fEflToClear;
3575 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3576
3577 if (fFlags & IEM_XCPT_FLAGS_CR2)
3578 pCtx->cr2 = uCr2;
3579
3580 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3581 iemRaiseXcptAdjustState(pCtx, u8Vector);
3582
3583 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3584}
3585
3586
3587/**
3588 * Implements exceptions and interrupts for long mode.
3589 *
3590 * @returns VBox strict status code.
3591 * @param pIemCpu The IEM per CPU instance data.
3592 * @param pCtx The CPU context.
3593 * @param cbInstr The number of bytes to offset rIP by in the return
3594 * address.
3595 * @param u8Vector The interrupt / exception vector number.
3596 * @param fFlags The flags.
3597 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3598 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3599 */
3600static VBOXSTRICTRC
3601iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3602 PCPUMCTX pCtx,
3603 uint8_t cbInstr,
3604 uint8_t u8Vector,
3605 uint32_t fFlags,
3606 uint16_t uErr,
3607 uint64_t uCr2)
3608{
3609 /*
3610 * Read the IDT entry.
3611 */
3612 uint16_t offIdt = (uint16_t)u8Vector << 4;
3613 if (pCtx->idtr.cbIdt < offIdt + 7)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3616 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 X86DESC64 Idte;
3619 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3620 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3621 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3622 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3623 return rcStrict;
3624 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3625 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3626 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3627
3628 /*
3629 * Check the descriptor type, DPL and such.
3630 * ASSUMES this is done in the same order as described for call-gate calls.
3631 */
3632 if (Idte.Gate.u1DescType)
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3635 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3636 }
3637 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3638 switch (Idte.Gate.u4Type)
3639 {
3640 case AMD64_SEL_TYPE_SYS_INT_GATE:
3641 fEflToClear |= X86_EFL_IF;
3642 break;
3643 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3644 break;
3645
3646 default:
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3648 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3649 }
3650
3651 /* Check DPL against CPL if applicable. */
3652 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3653 {
3654 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3655 {
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3657 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3658 }
3659 }
3660
3661 /* Is it there? */
3662 if (!Idte.Gate.u1Present)
3663 {
3664 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3665 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3666 }
3667
3668 /* A null CS is bad. */
3669 RTSEL NewCS = Idte.Gate.u16Sel;
3670 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3671 {
3672 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3673 return iemRaiseGeneralProtectionFault0(pIemCpu);
3674 }
3675
3676 /* Fetch the descriptor for the new CS. */
3677 IEMSELDESC DescCS;
3678 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3679 if (rcStrict != VINF_SUCCESS)
3680 {
3681 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3682 return rcStrict;
3683 }
3684
3685 /* Must be a 64-bit code segment. */
3686 if (!DescCS.Long.Gen.u1DescType)
3687 {
3688 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3689 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3690 }
3691 if ( !DescCS.Long.Gen.u1Long
3692 || DescCS.Long.Gen.u1DefBig
3693 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3694 {
3695 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3696 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3697 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3698 }
3699
3700 /* Don't allow lowering the privilege level. For non-conforming CS
3701 selectors, the CS.DPL sets the privilege level the trap/interrupt
3702 handler runs at. For conforming CS selectors, the CPL remains
3703 unchanged, but the CS.DPL must be <= CPL. */
3704 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3705 * when CPU in Ring-0. Result \#GP? */
3706 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3709 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3710 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3711 }
3712
3713
3714 /* Make sure the selector is present. */
3715 if (!DescCS.Legacy.Gen.u1Present)
3716 {
3717 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3718 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3719 }
3720
3721 /* Check that the new RIP is canonical. */
3722 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3723 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3724 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3725 if (!IEM_IS_CANONICAL(uNewRip))
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3728 return iemRaiseGeneralProtectionFault0(pIemCpu);
3729 }
3730
3731 /*
3732 * If the privilege level changes or if the IST isn't zero, we need to get
3733 * a new stack from the TSS.
3734 */
3735 uint64_t uNewRsp;
3736 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3737 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3738 if ( uNewCpl != pIemCpu->uCpl
3739 || Idte.Gate.u3IST != 0)
3740 {
3741 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3742 if (rcStrict != VINF_SUCCESS)
3743 return rcStrict;
3744 }
3745 else
3746 uNewRsp = pCtx->rsp;
3747 uNewRsp &= ~(uint64_t)0xf;
3748
3749 /*
3750 * Calc the flag image to push.
3751 */
3752 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3753 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3754 fEfl &= ~X86_EFL_RF;
3755 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3756 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3757
3758 /*
3759 * Start making changes.
3760 */
3761
3762 /* Create the stack frame. */
3763 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3764 RTPTRUNION uStackFrame;
3765 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3766 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3767 if (rcStrict != VINF_SUCCESS)
3768 return rcStrict;
3769 void * const pvStackFrame = uStackFrame.pv;
3770
3771 if (fFlags & IEM_XCPT_FLAGS_ERR)
3772 *uStackFrame.pu64++ = uErr;
3773 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3774 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3775 uStackFrame.pu64[2] = fEfl;
3776 uStackFrame.pu64[3] = pCtx->rsp;
3777 uStackFrame.pu64[4] = pCtx->ss.Sel;
3778 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3779 if (rcStrict != VINF_SUCCESS)
3780 return rcStrict;
3781
3782 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3783 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3784 * after pushing the stack frame? (Write protect the gdt + stack to
3785 * find out.) */
3786 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3787 {
3788 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3789 if (rcStrict != VINF_SUCCESS)
3790 return rcStrict;
3791 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3792 }
3793
3794 /*
3795 * Start comitting the register changes.
3796 */
3797 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3798 * hidden registers when interrupting 32-bit or 16-bit code! */
3799 if (uNewCpl != pIemCpu->uCpl)
3800 {
3801 pCtx->ss.Sel = 0 | uNewCpl;
3802 pCtx->ss.ValidSel = 0 | uNewCpl;
3803 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3804 pCtx->ss.u32Limit = UINT32_MAX;
3805 pCtx->ss.u64Base = 0;
3806 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3807 }
3808 pCtx->rsp = uNewRsp - cbStackFrame;
3809 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3810 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3811 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3812 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3813 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3814 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3815 pCtx->rip = uNewRip;
3816 pIemCpu->uCpl = uNewCpl;
3817
3818 fEfl &= ~fEflToClear;
3819 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3820
3821 if (fFlags & IEM_XCPT_FLAGS_CR2)
3822 pCtx->cr2 = uCr2;
3823
3824 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3825 iemRaiseXcptAdjustState(pCtx, u8Vector);
3826
3827 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3828}
3829
3830
3831/**
3832 * Implements exceptions and interrupts.
3833 *
3834 * All exceptions and interrupts goes thru this function!
3835 *
3836 * @returns VBox strict status code.
3837 * @param pIemCpu The IEM per CPU instance data.
3838 * @param cbInstr The number of bytes to offset rIP by in the return
3839 * address.
3840 * @param u8Vector The interrupt / exception vector number.
3841 * @param fFlags The flags.
3842 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3843 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3844 */
3845DECL_NO_INLINE(static, VBOXSTRICTRC)
3846iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3847 uint8_t cbInstr,
3848 uint8_t u8Vector,
3849 uint32_t fFlags,
3850 uint16_t uErr,
3851 uint64_t uCr2)
3852{
3853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3854#ifdef IN_RING0
3855 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3856 AssertRCReturn(rc, rc);
3857#endif
3858
3859 /*
3860 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3861 */
3862 if ( pCtx->eflags.Bits.u1VM
3863 && pCtx->eflags.Bits.u2IOPL != 3
3864 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3865 && (pCtx->cr0 & X86_CR0_PE) )
3866 {
3867 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3868 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3869 u8Vector = X86_XCPT_GP;
3870 uErr = 0;
3871 }
3872#ifdef DBGFTRACE_ENABLED
3873 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3874 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3875 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3876#endif
3877
3878 /*
3879 * Do recursion accounting.
3880 */
3881 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3882 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3883 if (pIemCpu->cXcptRecursions == 0)
3884 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3885 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3886 else
3887 {
3888 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3889 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3890
3891 /** @todo double and tripple faults. */
3892 if (pIemCpu->cXcptRecursions >= 3)
3893 {
3894#ifdef DEBUG_bird
3895 AssertFailed();
3896#endif
3897 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3898 }
3899
3900 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3901 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3902 {
3903 ....
3904 } */
3905 }
3906 pIemCpu->cXcptRecursions++;
3907 pIemCpu->uCurXcpt = u8Vector;
3908 pIemCpu->fCurXcpt = fFlags;
3909
3910 /*
3911 * Extensive logging.
3912 */
3913#if defined(LOG_ENABLED) && defined(IN_RING3)
3914 if (LogIs3Enabled())
3915 {
3916 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3917 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3918 char szRegs[4096];
3919 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3920 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3921 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3922 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3923 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3924 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3925 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3926 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3927 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3928 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3929 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3930 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3931 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3932 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3933 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3934 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3935 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3936 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3937 " efer=%016VR{efer}\n"
3938 " pat=%016VR{pat}\n"
3939 " sf_mask=%016VR{sf_mask}\n"
3940 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3941 " lstar=%016VR{lstar}\n"
3942 " star=%016VR{star} cstar=%016VR{cstar}\n"
3943 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3944 );
3945
3946 char szInstr[256];
3947 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3948 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3949 szInstr, sizeof(szInstr), NULL);
3950 Log3(("%s%s\n", szRegs, szInstr));
3951 }
3952#endif /* LOG_ENABLED */
3953
3954 /*
3955 * Call the mode specific worker function.
3956 */
3957 VBOXSTRICTRC rcStrict;
3958 if (!(pCtx->cr0 & X86_CR0_PE))
3959 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3960 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3961 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3962 else
3963 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3964
3965 /*
3966 * Unwind.
3967 */
3968 pIemCpu->cXcptRecursions--;
3969 pIemCpu->uCurXcpt = uPrevXcpt;
3970 pIemCpu->fCurXcpt = fPrevXcpt;
3971 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
3972 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
3973 return rcStrict;
3974}
3975
3976
3977/** \#DE - 00. */
3978DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
3979{
3980 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3981}
3982
3983
3984/** \#DB - 01.
3985 * @note This automatically clear DR7.GD. */
3986DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
3987{
3988 /** @todo set/clear RF. */
3989 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
3990 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3991}
3992
3993
3994/** \#UD - 06. */
3995DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
3996{
3997 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3998}
3999
4000
4001/** \#NM - 07. */
4002DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4003{
4004 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4005}
4006
4007
4008/** \#TS(err) - 0a. */
4009DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4010{
4011 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4012}
4013
4014
4015/** \#TS(tr) - 0a. */
4016DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4017{
4018 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4019 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4020}
4021
4022
4023/** \#TS(0) - 0a. */
4024DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4025{
4026 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4027 0, 0);
4028}
4029
4030
4031/** \#TS(err) - 0a. */
4032DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4033{
4034 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4035 uSel & X86_SEL_MASK_OFF_RPL, 0);
4036}
4037
4038
4039/** \#NP(err) - 0b. */
4040DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4041{
4042 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4043}
4044
4045
4046/** \#NP(seg) - 0b. */
4047DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4048{
4049 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4050 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4051}
4052
4053
4054/** \#NP(sel) - 0b. */
4055DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4056{
4057 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4058 uSel & ~X86_SEL_RPL, 0);
4059}
4060
4061
4062/** \#SS(seg) - 0c. */
4063DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4064{
4065 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4066 uSel & ~X86_SEL_RPL, 0);
4067}
4068
4069
4070/** \#SS(err) - 0c. */
4071DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4072{
4073 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4074}
4075
4076
4077/** \#GP(n) - 0d. */
4078DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4079{
4080 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4081}
4082
4083
4084/** \#GP(0) - 0d. */
4085DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4086{
4087 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4088}
4089
4090
4091/** \#GP(sel) - 0d. */
4092DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4093{
4094 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4095 Sel & ~X86_SEL_RPL, 0);
4096}
4097
4098
4099/** \#GP(0) - 0d. */
4100DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4101{
4102 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4103}
4104
4105
4106/** \#GP(sel) - 0d. */
4107DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4108{
4109 NOREF(iSegReg); NOREF(fAccess);
4110 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4111 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4112}
4113
4114
4115/** \#GP(sel) - 0d. */
4116DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4117{
4118 NOREF(Sel);
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4120}
4121
4122
4123/** \#GP(sel) - 0d. */
4124DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4125{
4126 NOREF(iSegReg); NOREF(fAccess);
4127 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4128}
4129
4130
4131/** \#PF(n) - 0e. */
4132DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4133{
4134 uint16_t uErr;
4135 switch (rc)
4136 {
4137 case VERR_PAGE_NOT_PRESENT:
4138 case VERR_PAGE_TABLE_NOT_PRESENT:
4139 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4140 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4141 uErr = 0;
4142 break;
4143
4144 default:
4145 AssertMsgFailed(("%Rrc\n", rc));
4146 case VERR_ACCESS_DENIED:
4147 uErr = X86_TRAP_PF_P;
4148 break;
4149
4150 /** @todo reserved */
4151 }
4152
4153 if (pIemCpu->uCpl == 3)
4154 uErr |= X86_TRAP_PF_US;
4155
4156 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4157 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4158 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4159 uErr |= X86_TRAP_PF_ID;
4160
4161#if 0 /* This is so much non-sense, really. Why was it done like that? */
4162 /* Note! RW access callers reporting a WRITE protection fault, will clear
4163 the READ flag before calling. So, read-modify-write accesses (RW)
4164 can safely be reported as READ faults. */
4165 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4166 uErr |= X86_TRAP_PF_RW;
4167#else
4168 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4169 {
4170 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4171 uErr |= X86_TRAP_PF_RW;
4172 }
4173#endif
4174
4175 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4176 uErr, GCPtrWhere);
4177}
4178
4179
4180/** \#MF(0) - 10. */
4181DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4182{
4183 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4184}
4185
4186
4187/** \#AC(0) - 11. */
4188DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4189{
4190 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4191}
4192
4193
4194/**
4195 * Macro for calling iemCImplRaiseDivideError().
4196 *
4197 * This enables us to add/remove arguments and force different levels of
4198 * inlining as we wish.
4199 *
4200 * @return Strict VBox status code.
4201 */
4202#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4203IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4204{
4205 NOREF(cbInstr);
4206 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4207}
4208
4209
4210/**
4211 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4212 *
4213 * This enables us to add/remove arguments and force different levels of
4214 * inlining as we wish.
4215 *
4216 * @return Strict VBox status code.
4217 */
4218#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4219IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4220{
4221 NOREF(cbInstr);
4222 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4223}
4224
4225
4226/**
4227 * Macro for calling iemCImplRaiseInvalidOpcode().
4228 *
4229 * This enables us to add/remove arguments and force different levels of
4230 * inlining as we wish.
4231 *
4232 * @return Strict VBox status code.
4233 */
4234#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4235IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4236{
4237 NOREF(cbInstr);
4238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4239}
4240
4241
4242/** @} */
4243
4244
4245/*
4246 *
4247 * Helpers routines.
4248 * Helpers routines.
4249 * Helpers routines.
4250 *
4251 */
4252
4253/**
4254 * Recalculates the effective operand size.
4255 *
4256 * @param pIemCpu The IEM state.
4257 */
4258static void iemRecalEffOpSize(PIEMCPU pIemCpu)
4259{
4260 switch (pIemCpu->enmCpuMode)
4261 {
4262 case IEMMODE_16BIT:
4263 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4264 break;
4265 case IEMMODE_32BIT:
4266 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4267 break;
4268 case IEMMODE_64BIT:
4269 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4270 {
4271 case 0:
4272 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4273 break;
4274 case IEM_OP_PRF_SIZE_OP:
4275 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4276 break;
4277 case IEM_OP_PRF_SIZE_REX_W:
4278 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4279 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4280 break;
4281 }
4282 break;
4283 default:
4284 AssertFailed();
4285 }
4286}
4287
4288
4289/**
4290 * Sets the default operand size to 64-bit and recalculates the effective
4291 * operand size.
4292 *
4293 * @param pIemCpu The IEM state.
4294 */
4295static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4296{
4297 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4298 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4299 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4300 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4301 else
4302 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4303}
4304
4305
4306/*
4307 *
4308 * Common opcode decoders.
4309 * Common opcode decoders.
4310 * Common opcode decoders.
4311 *
4312 */
4313//#include <iprt/mem.h>
4314
4315/**
4316 * Used to add extra details about a stub case.
4317 * @param pIemCpu The IEM per CPU state.
4318 */
4319static void iemOpStubMsg2(PIEMCPU pIemCpu)
4320{
4321#if defined(LOG_ENABLED) && defined(IN_RING3)
4322 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4323 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4324 char szRegs[4096];
4325 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4326 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4327 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4328 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4329 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4330 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4331 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4332 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4333 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4334 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4335 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4336 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4337 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4338 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4339 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4340 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4341 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4342 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4343 " efer=%016VR{efer}\n"
4344 " pat=%016VR{pat}\n"
4345 " sf_mask=%016VR{sf_mask}\n"
4346 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4347 " lstar=%016VR{lstar}\n"
4348 " star=%016VR{star} cstar=%016VR{cstar}\n"
4349 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4350 );
4351
4352 char szInstr[256];
4353 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4354 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4355 szInstr, sizeof(szInstr), NULL);
4356
4357 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4358#else
4359 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4360#endif
4361}
4362
4363/**
4364 * Complains about a stub.
4365 *
4366 * Providing two versions of this macro, one for daily use and one for use when
4367 * working on IEM.
4368 */
4369#if 0
4370# define IEMOP_BITCH_ABOUT_STUB() \
4371 do { \
4372 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4373 iemOpStubMsg2(pIemCpu); \
4374 RTAssertPanic(); \
4375 } while (0)
4376#else
4377# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4378#endif
4379
4380/** Stubs an opcode. */
4381#define FNIEMOP_STUB(a_Name) \
4382 FNIEMOP_DEF(a_Name) \
4383 { \
4384 IEMOP_BITCH_ABOUT_STUB(); \
4385 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4386 } \
4387 typedef int ignore_semicolon
4388
4389/** Stubs an opcode. */
4390#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4391 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4392 { \
4393 IEMOP_BITCH_ABOUT_STUB(); \
4394 NOREF(a_Name0); \
4395 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4396 } \
4397 typedef int ignore_semicolon
4398
4399/** Stubs an opcode which currently should raise \#UD. */
4400#define FNIEMOP_UD_STUB(a_Name) \
4401 FNIEMOP_DEF(a_Name) \
4402 { \
4403 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4404 return IEMOP_RAISE_INVALID_OPCODE(); \
4405 } \
4406 typedef int ignore_semicolon
4407
4408/** Stubs an opcode which currently should raise \#UD. */
4409#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4410 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4411 { \
4412 NOREF(a_Name0); \
4413 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4414 return IEMOP_RAISE_INVALID_OPCODE(); \
4415 } \
4416 typedef int ignore_semicolon
4417
4418
4419
4420/** @name Register Access.
4421 * @{
4422 */
4423
4424/**
4425 * Gets a reference (pointer) to the specified hidden segment register.
4426 *
4427 * @returns Hidden register reference.
4428 * @param pIemCpu The per CPU data.
4429 * @param iSegReg The segment register.
4430 */
4431static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4432{
4433 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4434 PCPUMSELREG pSReg;
4435 switch (iSegReg)
4436 {
4437 case X86_SREG_ES: pSReg = &pCtx->es; break;
4438 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4439 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4440 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4441 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4442 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4443 default:
4444 AssertFailedReturn(NULL);
4445 }
4446#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4447 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4448 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4449#else
4450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4451#endif
4452 return pSReg;
4453}
4454
4455
4456/**
4457 * Gets a reference (pointer) to the specified segment register (the selector
4458 * value).
4459 *
4460 * @returns Pointer to the selector variable.
4461 * @param pIemCpu The per CPU data.
4462 * @param iSegReg The segment register.
4463 */
4464static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4465{
4466 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4467 switch (iSegReg)
4468 {
4469 case X86_SREG_ES: return &pCtx->es.Sel;
4470 case X86_SREG_CS: return &pCtx->cs.Sel;
4471 case X86_SREG_SS: return &pCtx->ss.Sel;
4472 case X86_SREG_DS: return &pCtx->ds.Sel;
4473 case X86_SREG_FS: return &pCtx->fs.Sel;
4474 case X86_SREG_GS: return &pCtx->gs.Sel;
4475 }
4476 AssertFailedReturn(NULL);
4477}
4478
4479
4480/**
4481 * Fetches the selector value of a segment register.
4482 *
4483 * @returns The selector value.
4484 * @param pIemCpu The per CPU data.
4485 * @param iSegReg The segment register.
4486 */
4487static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4488{
4489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4490 switch (iSegReg)
4491 {
4492 case X86_SREG_ES: return pCtx->es.Sel;
4493 case X86_SREG_CS: return pCtx->cs.Sel;
4494 case X86_SREG_SS: return pCtx->ss.Sel;
4495 case X86_SREG_DS: return pCtx->ds.Sel;
4496 case X86_SREG_FS: return pCtx->fs.Sel;
4497 case X86_SREG_GS: return pCtx->gs.Sel;
4498 }
4499 AssertFailedReturn(0xffff);
4500}
4501
4502
4503/**
4504 * Gets a reference (pointer) to the specified general register.
4505 *
4506 * @returns Register reference.
4507 * @param pIemCpu The per CPU data.
4508 * @param iReg The general register.
4509 */
4510static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4511{
4512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4513 switch (iReg)
4514 {
4515 case X86_GREG_xAX: return &pCtx->rax;
4516 case X86_GREG_xCX: return &pCtx->rcx;
4517 case X86_GREG_xDX: return &pCtx->rdx;
4518 case X86_GREG_xBX: return &pCtx->rbx;
4519 case X86_GREG_xSP: return &pCtx->rsp;
4520 case X86_GREG_xBP: return &pCtx->rbp;
4521 case X86_GREG_xSI: return &pCtx->rsi;
4522 case X86_GREG_xDI: return &pCtx->rdi;
4523 case X86_GREG_x8: return &pCtx->r8;
4524 case X86_GREG_x9: return &pCtx->r9;
4525 case X86_GREG_x10: return &pCtx->r10;
4526 case X86_GREG_x11: return &pCtx->r11;
4527 case X86_GREG_x12: return &pCtx->r12;
4528 case X86_GREG_x13: return &pCtx->r13;
4529 case X86_GREG_x14: return &pCtx->r14;
4530 case X86_GREG_x15: return &pCtx->r15;
4531 }
4532 AssertFailedReturn(NULL);
4533}
4534
4535
4536/**
4537 * Gets a reference (pointer) to the specified 8-bit general register.
4538 *
4539 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4540 *
4541 * @returns Register reference.
4542 * @param pIemCpu The per CPU data.
4543 * @param iReg The register.
4544 */
4545static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4546{
4547 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4548 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4549
4550 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4551 if (iReg >= 4)
4552 pu8Reg++;
4553 return pu8Reg;
4554}
4555
4556
4557/**
4558 * Fetches the value of a 8-bit general register.
4559 *
4560 * @returns The register value.
4561 * @param pIemCpu The per CPU data.
4562 * @param iReg The register.
4563 */
4564static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4565{
4566 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4567 return *pbSrc;
4568}
4569
4570
4571/**
4572 * Fetches the value of a 16-bit general register.
4573 *
4574 * @returns The register value.
4575 * @param pIemCpu The per CPU data.
4576 * @param iReg The register.
4577 */
4578static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4579{
4580 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4581}
4582
4583
4584/**
4585 * Fetches the value of a 32-bit general register.
4586 *
4587 * @returns The register value.
4588 * @param pIemCpu The per CPU data.
4589 * @param iReg The register.
4590 */
4591static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4592{
4593 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4594}
4595
4596
4597/**
4598 * Fetches the value of a 64-bit general register.
4599 *
4600 * @returns The register value.
4601 * @param pIemCpu The per CPU data.
4602 * @param iReg The register.
4603 */
4604static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4605{
4606 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4607}
4608
4609
4610/**
4611 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4612 *
4613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4614 * segment limit.
4615 *
4616 * @param pIemCpu The per CPU data.
4617 * @param offNextInstr The offset of the next instruction.
4618 */
4619static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4620{
4621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4622 switch (pIemCpu->enmEffOpSize)
4623 {
4624 case IEMMODE_16BIT:
4625 {
4626 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4627 if ( uNewIp > pCtx->cs.u32Limit
4628 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4629 return iemRaiseGeneralProtectionFault0(pIemCpu);
4630 pCtx->rip = uNewIp;
4631 break;
4632 }
4633
4634 case IEMMODE_32BIT:
4635 {
4636 Assert(pCtx->rip <= UINT32_MAX);
4637 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4638
4639 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4640 if (uNewEip > pCtx->cs.u32Limit)
4641 return iemRaiseGeneralProtectionFault0(pIemCpu);
4642 pCtx->rip = uNewEip;
4643 break;
4644 }
4645
4646 case IEMMODE_64BIT:
4647 {
4648 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4649
4650 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4651 if (!IEM_IS_CANONICAL(uNewRip))
4652 return iemRaiseGeneralProtectionFault0(pIemCpu);
4653 pCtx->rip = uNewRip;
4654 break;
4655 }
4656
4657 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4658 }
4659
4660 pCtx->eflags.Bits.u1RF = 0;
4661 return VINF_SUCCESS;
4662}
4663
4664
4665/**
4666 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4667 *
4668 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4669 * segment limit.
4670 *
4671 * @returns Strict VBox status code.
4672 * @param pIemCpu The per CPU data.
4673 * @param offNextInstr The offset of the next instruction.
4674 */
4675static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4676{
4677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4678 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4679
4680 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4681 if ( uNewIp > pCtx->cs.u32Limit
4682 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4683 return iemRaiseGeneralProtectionFault0(pIemCpu);
4684 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4685 pCtx->rip = uNewIp;
4686 pCtx->eflags.Bits.u1RF = 0;
4687
4688 return VINF_SUCCESS;
4689}
4690
4691
4692/**
4693 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4694 *
4695 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4696 * segment limit.
4697 *
4698 * @returns Strict VBox status code.
4699 * @param pIemCpu The per CPU data.
4700 * @param offNextInstr The offset of the next instruction.
4701 */
4702static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4703{
4704 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4705 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4706
4707 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4708 {
4709 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4710
4711 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4712 if (uNewEip > pCtx->cs.u32Limit)
4713 return iemRaiseGeneralProtectionFault0(pIemCpu);
4714 pCtx->rip = uNewEip;
4715 }
4716 else
4717 {
4718 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4719
4720 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4721 if (!IEM_IS_CANONICAL(uNewRip))
4722 return iemRaiseGeneralProtectionFault0(pIemCpu);
4723 pCtx->rip = uNewRip;
4724 }
4725 pCtx->eflags.Bits.u1RF = 0;
4726 return VINF_SUCCESS;
4727}
4728
4729
4730/**
4731 * Performs a near jump to the specified address.
4732 *
4733 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4734 * segment limit.
4735 *
4736 * @param pIemCpu The per CPU data.
4737 * @param uNewRip The new RIP value.
4738 */
4739static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4740{
4741 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4742 switch (pIemCpu->enmEffOpSize)
4743 {
4744 case IEMMODE_16BIT:
4745 {
4746 Assert(uNewRip <= UINT16_MAX);
4747 if ( uNewRip > pCtx->cs.u32Limit
4748 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4749 return iemRaiseGeneralProtectionFault0(pIemCpu);
4750 /** @todo Test 16-bit jump in 64-bit mode. */
4751 pCtx->rip = uNewRip;
4752 break;
4753 }
4754
4755 case IEMMODE_32BIT:
4756 {
4757 Assert(uNewRip <= UINT32_MAX);
4758 Assert(pCtx->rip <= UINT32_MAX);
4759 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4760
4761 if (uNewRip > pCtx->cs.u32Limit)
4762 return iemRaiseGeneralProtectionFault0(pIemCpu);
4763 pCtx->rip = uNewRip;
4764 break;
4765 }
4766
4767 case IEMMODE_64BIT:
4768 {
4769 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4770
4771 if (!IEM_IS_CANONICAL(uNewRip))
4772 return iemRaiseGeneralProtectionFault0(pIemCpu);
4773 pCtx->rip = uNewRip;
4774 break;
4775 }
4776
4777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4778 }
4779
4780 pCtx->eflags.Bits.u1RF = 0;
4781 return VINF_SUCCESS;
4782}
4783
4784
4785/**
4786 * Get the address of the top of the stack.
4787 *
4788 * @param pIemCpu The per CPU data.
4789 * @param pCtx The CPU context which SP/ESP/RSP should be
4790 * read.
4791 */
4792DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4793{
4794 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4795 return pCtx->rsp;
4796 if (pCtx->ss.Attr.n.u1DefBig)
4797 return pCtx->esp;
4798 return pCtx->sp;
4799}
4800
4801
4802/**
4803 * Updates the RIP/EIP/IP to point to the next instruction.
4804 *
4805 * This function leaves the EFLAGS.RF flag alone.
4806 *
4807 * @param pIemCpu The per CPU data.
4808 * @param cbInstr The number of bytes to add.
4809 */
4810static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4811{
4812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4813 switch (pIemCpu->enmCpuMode)
4814 {
4815 case IEMMODE_16BIT:
4816 Assert(pCtx->rip <= UINT16_MAX);
4817 pCtx->eip += cbInstr;
4818 pCtx->eip &= UINT32_C(0xffff);
4819 break;
4820
4821 case IEMMODE_32BIT:
4822 pCtx->eip += cbInstr;
4823 Assert(pCtx->rip <= UINT32_MAX);
4824 break;
4825
4826 case IEMMODE_64BIT:
4827 pCtx->rip += cbInstr;
4828 break;
4829 default: AssertFailed();
4830 }
4831}
4832
4833
4834#if 0
4835/**
4836 * Updates the RIP/EIP/IP to point to the next instruction.
4837 *
4838 * @param pIemCpu The per CPU data.
4839 */
4840static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4841{
4842 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4843}
4844#endif
4845
4846
4847
4848/**
4849 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4850 *
4851 * @param pIemCpu The per CPU data.
4852 * @param cbInstr The number of bytes to add.
4853 */
4854static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4855{
4856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4857
4858 pCtx->eflags.Bits.u1RF = 0;
4859
4860 switch (pIemCpu->enmCpuMode)
4861 {
4862 case IEMMODE_16BIT:
4863 Assert(pCtx->rip <= UINT16_MAX);
4864 pCtx->eip += cbInstr;
4865 pCtx->eip &= UINT32_C(0xffff);
4866 break;
4867
4868 case IEMMODE_32BIT:
4869 pCtx->eip += cbInstr;
4870 Assert(pCtx->rip <= UINT32_MAX);
4871 break;
4872
4873 case IEMMODE_64BIT:
4874 pCtx->rip += cbInstr;
4875 break;
4876 default: AssertFailed();
4877 }
4878}
4879
4880
4881/**
4882 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4883 *
4884 * @param pIemCpu The per CPU data.
4885 */
4886static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4887{
4888 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4889}
4890
4891
4892/**
4893 * Adds to the stack pointer.
4894 *
4895 * @param pIemCpu The per CPU data.
4896 * @param pCtx The CPU context which SP/ESP/RSP should be
4897 * updated.
4898 * @param cbToAdd The number of bytes to add.
4899 */
4900DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4901{
4902 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4903 pCtx->rsp += cbToAdd;
4904 else if (pCtx->ss.Attr.n.u1DefBig)
4905 pCtx->esp += cbToAdd;
4906 else
4907 pCtx->sp += cbToAdd;
4908}
4909
4910
4911/**
4912 * Subtracts from the stack pointer.
4913 *
4914 * @param pIemCpu The per CPU data.
4915 * @param pCtx The CPU context which SP/ESP/RSP should be
4916 * updated.
4917 * @param cbToSub The number of bytes to subtract.
4918 */
4919DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4920{
4921 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4922 pCtx->rsp -= cbToSub;
4923 else if (pCtx->ss.Attr.n.u1DefBig)
4924 pCtx->esp -= cbToSub;
4925 else
4926 pCtx->sp -= cbToSub;
4927}
4928
4929
4930/**
4931 * Adds to the temporary stack pointer.
4932 *
4933 * @param pIemCpu The per CPU data.
4934 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4935 * @param cbToAdd The number of bytes to add.
4936 * @param pCtx Where to get the current stack mode.
4937 */
4938DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4939{
4940 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4941 pTmpRsp->u += cbToAdd;
4942 else if (pCtx->ss.Attr.n.u1DefBig)
4943 pTmpRsp->DWords.dw0 += cbToAdd;
4944 else
4945 pTmpRsp->Words.w0 += cbToAdd;
4946}
4947
4948
4949/**
4950 * Subtracts from the temporary stack pointer.
4951 *
4952 * @param pIemCpu The per CPU data.
4953 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4954 * @param cbToSub The number of bytes to subtract.
4955 * @param pCtx Where to get the current stack mode.
4956 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4957 * expecting that.
4958 */
4959DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
4960{
4961 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4962 pTmpRsp->u -= cbToSub;
4963 else if (pCtx->ss.Attr.n.u1DefBig)
4964 pTmpRsp->DWords.dw0 -= cbToSub;
4965 else
4966 pTmpRsp->Words.w0 -= cbToSub;
4967}
4968
4969
4970/**
4971 * Calculates the effective stack address for a push of the specified size as
4972 * well as the new RSP value (upper bits may be masked).
4973 *
4974 * @returns Effective stack addressf for the push.
4975 * @param pIemCpu The IEM per CPU data.
4976 * @param pCtx Where to get the current stack mode.
4977 * @param cbItem The size of the stack item to pop.
4978 * @param puNewRsp Where to return the new RSP value.
4979 */
4980DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
4981{
4982 RTUINT64U uTmpRsp;
4983 RTGCPTR GCPtrTop;
4984 uTmpRsp.u = pCtx->rsp;
4985
4986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4987 GCPtrTop = uTmpRsp.u -= cbItem;
4988 else if (pCtx->ss.Attr.n.u1DefBig)
4989 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
4990 else
4991 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
4992 *puNewRsp = uTmpRsp.u;
4993 return GCPtrTop;
4994}
4995
4996
4997/**
4998 * Gets the current stack pointer and calculates the value after a pop of the
4999 * specified size.
5000 *
5001 * @returns Current stack pointer.
5002 * @param pIemCpu The per CPU data.
5003 * @param pCtx Where to get the current stack mode.
5004 * @param cbItem The size of the stack item to pop.
5005 * @param puNewRsp Where to return the new RSP value.
5006 */
5007DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5008{
5009 RTUINT64U uTmpRsp;
5010 RTGCPTR GCPtrTop;
5011 uTmpRsp.u = pCtx->rsp;
5012
5013 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5014 {
5015 GCPtrTop = uTmpRsp.u;
5016 uTmpRsp.u += cbItem;
5017 }
5018 else if (pCtx->ss.Attr.n.u1DefBig)
5019 {
5020 GCPtrTop = uTmpRsp.DWords.dw0;
5021 uTmpRsp.DWords.dw0 += cbItem;
5022 }
5023 else
5024 {
5025 GCPtrTop = uTmpRsp.Words.w0;
5026 uTmpRsp.Words.w0 += cbItem;
5027 }
5028 *puNewRsp = uTmpRsp.u;
5029 return GCPtrTop;
5030}
5031
5032
5033/**
5034 * Calculates the effective stack address for a push of the specified size as
5035 * well as the new temporary RSP value (upper bits may be masked).
5036 *
5037 * @returns Effective stack addressf for the push.
5038 * @param pIemCpu The per CPU data.
5039 * @param pTmpRsp The temporary stack pointer. This is updated.
5040 * @param cbItem The size of the stack item to pop.
5041 * @param puNewRsp Where to return the new RSP value.
5042 */
5043DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5044{
5045 RTGCPTR GCPtrTop;
5046
5047 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5048 GCPtrTop = pTmpRsp->u -= cbItem;
5049 else if (pCtx->ss.Attr.n.u1DefBig)
5050 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5051 else
5052 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5053 return GCPtrTop;
5054}
5055
5056
5057/**
5058 * Gets the effective stack address for a pop of the specified size and
5059 * calculates and updates the temporary RSP.
5060 *
5061 * @returns Current stack pointer.
5062 * @param pIemCpu The per CPU data.
5063 * @param pTmpRsp The temporary stack pointer. This is updated.
5064 * @param pCtx Where to get the current stack mode.
5065 * @param cbItem The size of the stack item to pop.
5066 */
5067DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5068{
5069 RTGCPTR GCPtrTop;
5070 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5071 {
5072 GCPtrTop = pTmpRsp->u;
5073 pTmpRsp->u += cbItem;
5074 }
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 {
5077 GCPtrTop = pTmpRsp->DWords.dw0;
5078 pTmpRsp->DWords.dw0 += cbItem;
5079 }
5080 else
5081 {
5082 GCPtrTop = pTmpRsp->Words.w0;
5083 pTmpRsp->Words.w0 += cbItem;
5084 }
5085 return GCPtrTop;
5086}
5087
5088/** @} */
5089
5090
5091/** @name FPU access and helpers.
5092 *
5093 * @{
5094 */
5095
5096
5097/**
5098 * Hook for preparing to use the host FPU.
5099 *
5100 * This is necessary in ring-0 and raw-mode context.
5101 *
5102 * @param pIemCpu The IEM per CPU data.
5103 */
5104DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5105{
5106#ifdef IN_RING3
5107 NOREF(pIemCpu);
5108#else
5109/** @todo RZ: FIXME */
5110//# error "Implement me"
5111#endif
5112}
5113
5114
5115/**
5116 * Hook for preparing to use the host FPU for SSE
5117 *
5118 * This is necessary in ring-0 and raw-mode context.
5119 *
5120 * @param pIemCpu The IEM per CPU data.
5121 */
5122DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5123{
5124 iemFpuPrepareUsage(pIemCpu);
5125}
5126
5127
5128/**
5129 * Stores a QNaN value into a FPU register.
5130 *
5131 * @param pReg Pointer to the register.
5132 */
5133DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5134{
5135 pReg->au32[0] = UINT32_C(0x00000000);
5136 pReg->au32[1] = UINT32_C(0xc0000000);
5137 pReg->au16[4] = UINT16_C(0xffff);
5138}
5139
5140
5141/**
5142 * Updates the FOP, FPU.CS and FPUIP registers.
5143 *
5144 * @param pIemCpu The IEM per CPU data.
5145 * @param pCtx The CPU context.
5146 * @param pFpuCtx The FPU context.
5147 */
5148DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5149{
5150 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5151 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5152 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5153 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5154 {
5155 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5156 * happens in real mode here based on the fnsave and fnstenv images. */
5157 pFpuCtx->CS = 0;
5158 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5159 }
5160 else
5161 {
5162 pFpuCtx->CS = pCtx->cs.Sel;
5163 pFpuCtx->FPUIP = pCtx->rip;
5164 }
5165}
5166
5167
5168/**
5169 * Updates the x87.DS and FPUDP registers.
5170 *
5171 * @param pIemCpu The IEM per CPU data.
5172 * @param pCtx The CPU context.
5173 * @param pFpuCtx The FPU context.
5174 * @param iEffSeg The effective segment register.
5175 * @param GCPtrEff The effective address relative to @a iEffSeg.
5176 */
5177DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5178{
5179 RTSEL sel;
5180 switch (iEffSeg)
5181 {
5182 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5183 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5184 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5185 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5186 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5187 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5188 default:
5189 AssertMsgFailed(("%d\n", iEffSeg));
5190 sel = pCtx->ds.Sel;
5191 }
5192 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5193 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5194 {
5195 pFpuCtx->DS = 0;
5196 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5197 }
5198 else
5199 {
5200 pFpuCtx->DS = sel;
5201 pFpuCtx->FPUDP = GCPtrEff;
5202 }
5203}
5204
5205
5206/**
5207 * Rotates the stack registers in the push direction.
5208 *
5209 * @param pFpuCtx The FPU context.
5210 * @remarks This is a complete waste of time, but fxsave stores the registers in
5211 * stack order.
5212 */
5213DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5214{
5215 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5216 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5217 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5218 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5219 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5220 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5221 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5222 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5223 pFpuCtx->aRegs[0].r80 = r80Tmp;
5224}
5225
5226
5227/**
5228 * Rotates the stack registers in the pop direction.
5229 *
5230 * @param pFpuCtx The FPU context.
5231 * @remarks This is a complete waste of time, but fxsave stores the registers in
5232 * stack order.
5233 */
5234DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5235{
5236 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5237 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5238 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5239 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5240 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5241 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5242 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5243 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5244 pFpuCtx->aRegs[7].r80 = r80Tmp;
5245}
5246
5247
5248/**
5249 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5250 * exception prevents it.
5251 *
5252 * @param pIemCpu The IEM per CPU data.
5253 * @param pResult The FPU operation result to push.
5254 * @param pFpuCtx The FPU context.
5255 */
5256static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5257{
5258 /* Update FSW and bail if there are pending exceptions afterwards. */
5259 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5260 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5261 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5262 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5263 {
5264 pFpuCtx->FSW = fFsw;
5265 return;
5266 }
5267
5268 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5269 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5270 {
5271 /* All is fine, push the actual value. */
5272 pFpuCtx->FTW |= RT_BIT(iNewTop);
5273 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5274 }
5275 else if (pFpuCtx->FCW & X86_FCW_IM)
5276 {
5277 /* Masked stack overflow, push QNaN. */
5278 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5279 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5280 }
5281 else
5282 {
5283 /* Raise stack overflow, don't push anything. */
5284 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5285 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5286 return;
5287 }
5288
5289 fFsw &= ~X86_FSW_TOP_MASK;
5290 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5291 pFpuCtx->FSW = fFsw;
5292
5293 iemFpuRotateStackPush(pFpuCtx);
5294}
5295
5296
5297/**
5298 * Stores a result in a FPU register and updates the FSW and FTW.
5299 *
5300 * @param pFpuCtx The FPU context.
5301 * @param pResult The result to store.
5302 * @param iStReg Which FPU register to store it in.
5303 */
5304static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5305{
5306 Assert(iStReg < 8);
5307 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5308 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5309 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5310 pFpuCtx->FTW |= RT_BIT(iReg);
5311 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5312}
5313
5314
5315/**
5316 * Only updates the FPU status word (FSW) with the result of the current
5317 * instruction.
5318 *
5319 * @param pFpuCtx The FPU context.
5320 * @param u16FSW The FSW output of the current instruction.
5321 */
5322static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5323{
5324 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5325 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5326}
5327
5328
5329/**
5330 * Pops one item off the FPU stack if no pending exception prevents it.
5331 *
5332 * @param pFpuCtx The FPU context.
5333 */
5334static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5335{
5336 /* Check pending exceptions. */
5337 uint16_t uFSW = pFpuCtx->FSW;
5338 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5339 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5340 return;
5341
5342 /* TOP--. */
5343 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5344 uFSW &= ~X86_FSW_TOP_MASK;
5345 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5346 pFpuCtx->FSW = uFSW;
5347
5348 /* Mark the previous ST0 as empty. */
5349 iOldTop >>= X86_FSW_TOP_SHIFT;
5350 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5351
5352 /* Rotate the registers. */
5353 iemFpuRotateStackPop(pFpuCtx);
5354}
5355
5356
5357/**
5358 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5359 *
5360 * @param pIemCpu The IEM per CPU data.
5361 * @param pResult The FPU operation result to push.
5362 */
5363static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5364{
5365 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5366 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5367 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5368 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5369}
5370
5371
5372/**
5373 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5374 * and sets FPUDP and FPUDS.
5375 *
5376 * @param pIemCpu The IEM per CPU data.
5377 * @param pResult The FPU operation result to push.
5378 * @param iEffSeg The effective segment register.
5379 * @param GCPtrEff The effective address relative to @a iEffSeg.
5380 */
5381static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5382{
5383 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5384 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5385 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5386 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5387 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5388}
5389
5390
5391/**
5392 * Replace ST0 with the first value and push the second onto the FPU stack,
5393 * unless a pending exception prevents it.
5394 *
5395 * @param pIemCpu The IEM per CPU data.
5396 * @param pResult The FPU operation result to store and push.
5397 */
5398static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5399{
5400 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5401 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5402 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5403
5404 /* Update FSW and bail if there are pending exceptions afterwards. */
5405 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5406 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5407 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5408 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5409 {
5410 pFpuCtx->FSW = fFsw;
5411 return;
5412 }
5413
5414 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5415 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5416 {
5417 /* All is fine, push the actual value. */
5418 pFpuCtx->FTW |= RT_BIT(iNewTop);
5419 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5420 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5421 }
5422 else if (pFpuCtx->FCW & X86_FCW_IM)
5423 {
5424 /* Masked stack overflow, push QNaN. */
5425 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5426 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5427 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5428 }
5429 else
5430 {
5431 /* Raise stack overflow, don't push anything. */
5432 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5433 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5434 return;
5435 }
5436
5437 fFsw &= ~X86_FSW_TOP_MASK;
5438 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5439 pFpuCtx->FSW = fFsw;
5440
5441 iemFpuRotateStackPush(pFpuCtx);
5442}
5443
5444
5445/**
5446 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5447 * FOP.
5448 *
5449 * @param pIemCpu The IEM per CPU data.
5450 * @param pResult The result to store.
5451 * @param iStReg Which FPU register to store it in.
5452 * @param pCtx The CPU context.
5453 */
5454static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5455{
5456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5457 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5458 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5459 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5460}
5461
5462
5463/**
5464 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5465 * FOP, and then pops the stack.
5466 *
5467 * @param pIemCpu The IEM per CPU data.
5468 * @param pResult The result to store.
5469 * @param iStReg Which FPU register to store it in.
5470 * @param pCtx The CPU context.
5471 */
5472static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5473{
5474 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5475 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5476 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5477 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5478 iemFpuMaybePopOne(pFpuCtx);
5479}
5480
5481
5482/**
5483 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5484 * FPUDP, and FPUDS.
5485 *
5486 * @param pIemCpu The IEM per CPU data.
5487 * @param pResult The result to store.
5488 * @param iStReg Which FPU register to store it in.
5489 * @param pCtx The CPU context.
5490 * @param iEffSeg The effective memory operand selector register.
5491 * @param GCPtrEff The effective memory operand offset.
5492 */
5493static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5494{
5495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5497 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5500}
5501
5502
5503/**
5504 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5505 * FPUDP, and FPUDS, and then pops the stack.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The result to store.
5509 * @param iStReg Which FPU register to store it in.
5510 * @param pCtx The CPU context.
5511 * @param iEffSeg The effective memory operand selector register.
5512 * @param GCPtrEff The effective memory operand offset.
5513 */
5514static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5515 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5516{
5517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5518 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5519 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5520 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5521 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5522 iemFpuMaybePopOne(pFpuCtx);
5523}
5524
5525
5526/**
5527 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5528 *
5529 * @param pIemCpu The IEM per CPU data.
5530 */
5531static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5532{
5533 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5534 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5535 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5536}
5537
5538
5539/**
5540 * Marks the specified stack register as free (for FFREE).
5541 *
5542 * @param pIemCpu The IEM per CPU data.
5543 * @param iStReg The register to free.
5544 */
5545static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5546{
5547 Assert(iStReg < 8);
5548 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5549 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5550 pFpuCtx->FTW &= ~RT_BIT(iReg);
5551}
5552
5553
5554/**
5555 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5556 *
5557 * @param pIemCpu The IEM per CPU data.
5558 */
5559static void iemFpuStackIncTop(PIEMCPU pIemCpu)
5560{
5561 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5562 uint16_t uFsw = pFpuCtx->FSW;
5563 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5564 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5565 uFsw &= ~X86_FSW_TOP_MASK;
5566 uFsw |= uTop;
5567 pFpuCtx->FSW = uFsw;
5568}
5569
5570
5571/**
5572 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5573 *
5574 * @param pIemCpu The IEM per CPU data.
5575 */
5576static void iemFpuStackDecTop(PIEMCPU pIemCpu)
5577{
5578 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5579 uint16_t uFsw = pFpuCtx->FSW;
5580 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5581 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5582 uFsw &= ~X86_FSW_TOP_MASK;
5583 uFsw |= uTop;
5584 pFpuCtx->FSW = uFsw;
5585}
5586
5587
5588/**
5589 * Updates the FSW, FOP, FPUIP, and FPUCS.
5590 *
5591 * @param pIemCpu The IEM per CPU data.
5592 * @param u16FSW The FSW from the current instruction.
5593 */
5594static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5595{
5596 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5597 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5598 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5599 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5600}
5601
5602
5603/**
5604 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5605 *
5606 * @param pIemCpu The IEM per CPU data.
5607 * @param u16FSW The FSW from the current instruction.
5608 */
5609static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5610{
5611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5612 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5613 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5614 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5615 iemFpuMaybePopOne(pFpuCtx);
5616}
5617
5618
5619/**
5620 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5621 *
5622 * @param pIemCpu The IEM per CPU data.
5623 * @param u16FSW The FSW from the current instruction.
5624 * @param iEffSeg The effective memory operand selector register.
5625 * @param GCPtrEff The effective memory operand offset.
5626 */
5627static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5628{
5629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5630 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5631 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5632 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5633 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5634}
5635
5636
5637/**
5638 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5639 *
5640 * @param pIemCpu The IEM per CPU data.
5641 * @param u16FSW The FSW from the current instruction.
5642 */
5643static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5644{
5645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5646 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5647 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5648 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5649 iemFpuMaybePopOne(pFpuCtx);
5650 iemFpuMaybePopOne(pFpuCtx);
5651}
5652
5653
5654/**
5655 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5656 *
5657 * @param pIemCpu The IEM per CPU data.
5658 * @param u16FSW The FSW from the current instruction.
5659 * @param iEffSeg The effective memory operand selector register.
5660 * @param GCPtrEff The effective memory operand offset.
5661 */
5662static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5663{
5664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5665 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5666 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5667 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5668 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5669 iemFpuMaybePopOne(pFpuCtx);
5670}
5671
5672
5673/**
5674 * Worker routine for raising an FPU stack underflow exception.
5675 *
5676 * @param pIemCpu The IEM per CPU data.
5677 * @param pFpuCtx The FPU context.
5678 * @param iStReg The stack register being accessed.
5679 */
5680static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5681{
5682 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5683 if (pFpuCtx->FCW & X86_FCW_IM)
5684 {
5685 /* Masked underflow. */
5686 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5687 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5688 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5689 if (iStReg != UINT8_MAX)
5690 {
5691 pFpuCtx->FTW |= RT_BIT(iReg);
5692 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5693 }
5694 }
5695 else
5696 {
5697 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5699 }
5700}
5701
5702
5703/**
5704 * Raises a FPU stack underflow exception.
5705 *
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param iStReg The destination register that should be loaded
5708 * with QNaN if \#IS is not masked. Specify
5709 * UINT8_MAX if none (like for fcom).
5710 */
5711DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5712{
5713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5714 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5715 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5716 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5717}
5718
5719
5720DECL_NO_INLINE(static, void)
5721iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5722{
5723 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5724 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5725 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5726 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5727 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5728}
5729
5730
5731DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5732{
5733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5735 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5736 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5737 iemFpuMaybePopOne(pFpuCtx);
5738}
5739
5740
5741DECL_NO_INLINE(static, void)
5742iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5743{
5744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5745 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5746 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5747 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5748 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5749 iemFpuMaybePopOne(pFpuCtx);
5750}
5751
5752
5753DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5754{
5755 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5756 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5757 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5758 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5759 iemFpuMaybePopOne(pFpuCtx);
5760 iemFpuMaybePopOne(pFpuCtx);
5761}
5762
5763
5764DECL_NO_INLINE(static, void)
5765iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5766{
5767 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5768 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5769 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5770
5771 if (pFpuCtx->FCW & X86_FCW_IM)
5772 {
5773 /* Masked overflow - Push QNaN. */
5774 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5775 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5776 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5777 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5778 pFpuCtx->FTW |= RT_BIT(iNewTop);
5779 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5780 iemFpuRotateStackPush(pFpuCtx);
5781 }
5782 else
5783 {
5784 /* Exception pending - don't change TOP or the register stack. */
5785 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5786 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5787 }
5788}
5789
5790
5791DECL_NO_INLINE(static, void)
5792iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5793{
5794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5795 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5796 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5797
5798 if (pFpuCtx->FCW & X86_FCW_IM)
5799 {
5800 /* Masked overflow - Push QNaN. */
5801 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5802 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5803 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5804 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5805 pFpuCtx->FTW |= RT_BIT(iNewTop);
5806 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5807 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5808 iemFpuRotateStackPush(pFpuCtx);
5809 }
5810 else
5811 {
5812 /* Exception pending - don't change TOP or the register stack. */
5813 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5814 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5815 }
5816}
5817
5818
5819/**
5820 * Worker routine for raising an FPU stack overflow exception on a push.
5821 *
5822 * @param pFpuCtx The FPU context.
5823 */
5824static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5825{
5826 if (pFpuCtx->FCW & X86_FCW_IM)
5827 {
5828 /* Masked overflow. */
5829 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5830 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5831 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5832 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5833 pFpuCtx->FTW |= RT_BIT(iNewTop);
5834 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5835 iemFpuRotateStackPush(pFpuCtx);
5836 }
5837 else
5838 {
5839 /* Exception pending - don't change TOP or the register stack. */
5840 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5841 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5842 }
5843}
5844
5845
5846/**
5847 * Raises a FPU stack overflow exception on a push.
5848 *
5849 * @param pIemCpu The IEM per CPU data.
5850 */
5851DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5852{
5853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5855 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5856 iemFpuStackPushOverflowOnly(pFpuCtx);
5857}
5858
5859
5860/**
5861 * Raises a FPU stack overflow exception on a push with a memory operand.
5862 *
5863 * @param pIemCpu The IEM per CPU data.
5864 * @param iEffSeg The effective memory operand selector register.
5865 * @param GCPtrEff The effective memory operand offset.
5866 */
5867DECL_NO_INLINE(static, void)
5868iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5869{
5870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5871 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5872 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5873 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5874 iemFpuStackPushOverflowOnly(pFpuCtx);
5875}
5876
5877
5878static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5879{
5880 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5881 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5882 if (pFpuCtx->FTW & RT_BIT(iReg))
5883 return VINF_SUCCESS;
5884 return VERR_NOT_FOUND;
5885}
5886
5887
5888static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5889{
5890 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5891 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5892 if (pFpuCtx->FTW & RT_BIT(iReg))
5893 {
5894 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5895 return VINF_SUCCESS;
5896 }
5897 return VERR_NOT_FOUND;
5898}
5899
5900
5901static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5902 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5903{
5904 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5905 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5906 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5907 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5908 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5909 {
5910 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5911 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5912 return VINF_SUCCESS;
5913 }
5914 return VERR_NOT_FOUND;
5915}
5916
5917
5918static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5919{
5920 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5921 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5922 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5923 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5924 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5925 {
5926 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5927 return VINF_SUCCESS;
5928 }
5929 return VERR_NOT_FOUND;
5930}
5931
5932
5933/**
5934 * Updates the FPU exception status after FCW is changed.
5935 *
5936 * @param pFpuCtx The FPU context.
5937 */
5938static void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5939{
5940 uint16_t u16Fsw = pFpuCtx->FSW;
5941 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5942 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5943 else
5944 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5945 pFpuCtx->FSW = u16Fsw;
5946}
5947
5948
5949/**
5950 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5951 *
5952 * @returns The full FTW.
5953 * @param pFpuCtx The FPU context.
5954 */
5955static uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
5956{
5957 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
5958 uint16_t u16Ftw = 0;
5959 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5960 for (unsigned iSt = 0; iSt < 8; iSt++)
5961 {
5962 unsigned const iReg = (iSt + iTop) & 7;
5963 if (!(u8Ftw & RT_BIT(iReg)))
5964 u16Ftw |= 3 << (iReg * 2); /* empty */
5965 else
5966 {
5967 uint16_t uTag;
5968 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
5969 if (pr80Reg->s.uExponent == 0x7fff)
5970 uTag = 2; /* Exponent is all 1's => Special. */
5971 else if (pr80Reg->s.uExponent == 0x0000)
5972 {
5973 if (pr80Reg->s.u64Mantissa == 0x0000)
5974 uTag = 1; /* All bits are zero => Zero. */
5975 else
5976 uTag = 2; /* Must be special. */
5977 }
5978 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
5979 uTag = 0; /* Valid. */
5980 else
5981 uTag = 2; /* Must be special. */
5982
5983 u16Ftw |= uTag << (iReg * 2); /* empty */
5984 }
5985 }
5986
5987 return u16Ftw;
5988}
5989
5990
5991/**
5992 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
5993 *
5994 * @returns The compressed FTW.
5995 * @param u16FullFtw The full FTW to convert.
5996 */
5997static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
5998{
5999 uint8_t u8Ftw = 0;
6000 for (unsigned i = 0; i < 8; i++)
6001 {
6002 if ((u16FullFtw & 3) != 3 /*empty*/)
6003 u8Ftw |= RT_BIT(i);
6004 u16FullFtw >>= 2;
6005 }
6006
6007 return u8Ftw;
6008}
6009
6010/** @} */
6011
6012
6013/** @name Memory access.
6014 *
6015 * @{
6016 */
6017
6018
6019/**
6020 * Updates the IEMCPU::cbWritten counter if applicable.
6021 *
6022 * @param pIemCpu The IEM per CPU data.
6023 * @param fAccess The access being accounted for.
6024 * @param cbMem The access size.
6025 */
6026DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6027{
6028 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6029 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6030 pIemCpu->cbWritten += (uint32_t)cbMem;
6031}
6032
6033
6034/**
6035 * Checks if the given segment can be written to, raise the appropriate
6036 * exception if not.
6037 *
6038 * @returns VBox strict status code.
6039 *
6040 * @param pIemCpu The IEM per CPU data.
6041 * @param pHid Pointer to the hidden register.
6042 * @param iSegReg The register number.
6043 * @param pu64BaseAddr Where to return the base address to use for the
6044 * segment. (In 64-bit code it may differ from the
6045 * base in the hidden segment.)
6046 */
6047static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6048{
6049 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6050 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6051 else
6052 {
6053 if (!pHid->Attr.n.u1Present)
6054 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6055
6056 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6057 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6058 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6059 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6060 *pu64BaseAddr = pHid->u64Base;
6061 }
6062 return VINF_SUCCESS;
6063}
6064
6065
6066/**
6067 * Checks if the given segment can be read from, raise the appropriate
6068 * exception if not.
6069 *
6070 * @returns VBox strict status code.
6071 *
6072 * @param pIemCpu The IEM per CPU data.
6073 * @param pHid Pointer to the hidden register.
6074 * @param iSegReg The register number.
6075 * @param pu64BaseAddr Where to return the base address to use for the
6076 * segment. (In 64-bit code it may differ from the
6077 * base in the hidden segment.)
6078 */
6079static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6080{
6081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6082 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6083 else
6084 {
6085 if (!pHid->Attr.n.u1Present)
6086 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6087
6088 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6089 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6090 *pu64BaseAddr = pHid->u64Base;
6091 }
6092 return VINF_SUCCESS;
6093}
6094
6095
6096/**
6097 * Applies the segment limit, base and attributes.
6098 *
6099 * This may raise a \#GP or \#SS.
6100 *
6101 * @returns VBox strict status code.
6102 *
6103 * @param pIemCpu The IEM per CPU data.
6104 * @param fAccess The kind of access which is being performed.
6105 * @param iSegReg The index of the segment register to apply.
6106 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6107 * TSS, ++).
6108 * @param pGCPtrMem Pointer to the guest memory address to apply
6109 * segmentation to. Input and output parameter.
6110 */
6111static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
6112 size_t cbMem, PRTGCPTR pGCPtrMem)
6113{
6114 if (iSegReg == UINT8_MAX)
6115 return VINF_SUCCESS;
6116
6117 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6118 switch (pIemCpu->enmCpuMode)
6119 {
6120 case IEMMODE_16BIT:
6121 case IEMMODE_32BIT:
6122 {
6123 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6124 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6125
6126 Assert(pSel->Attr.n.u1Present);
6127 Assert(pSel->Attr.n.u1DescType);
6128 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6129 {
6130 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6131 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6132 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6133
6134 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6135 {
6136 /** @todo CPL check. */
6137 }
6138
6139 /*
6140 * There are two kinds of data selectors, normal and expand down.
6141 */
6142 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6143 {
6144 if ( GCPtrFirst32 > pSel->u32Limit
6145 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6146 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6147 }
6148 else
6149 {
6150 /*
6151 * The upper boundary is defined by the B bit, not the G bit!
6152 */
6153 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6154 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6155 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6156 }
6157 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6158 }
6159 else
6160 {
6161
6162 /*
6163 * Code selector and usually be used to read thru, writing is
6164 * only permitted in real and V8086 mode.
6165 */
6166 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6167 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6168 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6169 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6170 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6171
6172 if ( GCPtrFirst32 > pSel->u32Limit
6173 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6174 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6175
6176 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6177 {
6178 /** @todo CPL check. */
6179 }
6180
6181 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6182 }
6183 return VINF_SUCCESS;
6184 }
6185
6186 case IEMMODE_64BIT:
6187 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6188 *pGCPtrMem += pSel->u64Base;
6189 return VINF_SUCCESS;
6190
6191 default:
6192 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6193 }
6194}
6195
6196
6197/**
6198 * Translates a virtual address to a physical physical address and checks if we
6199 * can access the page as specified.
6200 *
6201 * @param pIemCpu The IEM per CPU data.
6202 * @param GCPtrMem The virtual address.
6203 * @param fAccess The intended access.
6204 * @param pGCPhysMem Where to return the physical address.
6205 */
6206static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
6207 PRTGCPHYS pGCPhysMem)
6208{
6209 /** @todo Need a different PGM interface here. We're currently using
6210 * generic / REM interfaces. this won't cut it for R0 & RC. */
6211 RTGCPHYS GCPhys;
6212 uint64_t fFlags;
6213 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6214 if (RT_FAILURE(rc))
6215 {
6216 /** @todo Check unassigned memory in unpaged mode. */
6217 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6218 *pGCPhysMem = NIL_RTGCPHYS;
6219 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6220 }
6221
6222 /* If the page is writable and does not have the no-exec bit set, all
6223 access is allowed. Otherwise we'll have to check more carefully... */
6224 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6225 {
6226 /* Write to read only memory? */
6227 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6228 && !(fFlags & X86_PTE_RW)
6229 && ( pIemCpu->uCpl != 0
6230 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6231 {
6232 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6233 *pGCPhysMem = NIL_RTGCPHYS;
6234 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6235 }
6236
6237 /* Kernel memory accessed by userland? */
6238 if ( !(fFlags & X86_PTE_US)
6239 && pIemCpu->uCpl == 3
6240 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6241 {
6242 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6243 *pGCPhysMem = NIL_RTGCPHYS;
6244 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6245 }
6246
6247 /* Executing non-executable memory? */
6248 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6249 && (fFlags & X86_PTE_PAE_NX)
6250 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6251 {
6252 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6253 *pGCPhysMem = NIL_RTGCPHYS;
6254 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6255 VERR_ACCESS_DENIED);
6256 }
6257 }
6258
6259 /*
6260 * Set the dirty / access flags.
6261 * ASSUMES this is set when the address is translated rather than on committ...
6262 */
6263 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6264 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6265 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6266 {
6267 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6268 AssertRC(rc2);
6269 }
6270
6271 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6272 *pGCPhysMem = GCPhys;
6273 return VINF_SUCCESS;
6274}
6275
6276
6277
6278/**
6279 * Maps a physical page.
6280 *
6281 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6282 * @param pIemCpu The IEM per CPU data.
6283 * @param GCPhysMem The physical address.
6284 * @param fAccess The intended access.
6285 * @param ppvMem Where to return the mapping address.
6286 * @param pLock The PGM lock.
6287 */
6288static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6289{
6290#ifdef IEM_VERIFICATION_MODE_FULL
6291 /* Force the alternative path so we can ignore writes. */
6292 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6293 {
6294 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6295 {
6296 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6297 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6298 if (RT_FAILURE(rc2))
6299 pIemCpu->fProblematicMemory = true;
6300 }
6301 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6302 }
6303#endif
6304#ifdef IEM_LOG_MEMORY_WRITES
6305 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6306 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6307#endif
6308#ifdef IEM_VERIFICATION_MODE_MINIMAL
6309 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6310#endif
6311
6312 /** @todo This API may require some improving later. A private deal with PGM
6313 * regarding locking and unlocking needs to be struct. A couple of TLBs
6314 * living in PGM, but with publicly accessible inlined access methods
6315 * could perhaps be an even better solution. */
6316 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6317 GCPhysMem,
6318 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6319 pIemCpu->fBypassHandlers,
6320 ppvMem,
6321 pLock);
6322 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6323 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6324
6325#ifdef IEM_VERIFICATION_MODE_FULL
6326 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6327 pIemCpu->fProblematicMemory = true;
6328#endif
6329 return rc;
6330}
6331
6332
6333/**
6334 * Unmap a page previously mapped by iemMemPageMap.
6335 *
6336 * @param pIemCpu The IEM per CPU data.
6337 * @param GCPhysMem The physical address.
6338 * @param fAccess The intended access.
6339 * @param pvMem What iemMemPageMap returned.
6340 * @param pLock The PGM lock.
6341 */
6342DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6343{
6344 NOREF(pIemCpu);
6345 NOREF(GCPhysMem);
6346 NOREF(fAccess);
6347 NOREF(pvMem);
6348 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6349}
6350
6351
6352/**
6353 * Looks up a memory mapping entry.
6354 *
6355 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6356 * @param pIemCpu The IEM per CPU data.
6357 * @param pvMem The memory address.
6358 * @param fAccess The access to.
6359 */
6360DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6361{
6362 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6363 if ( pIemCpu->aMemMappings[0].pv == pvMem
6364 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6365 return 0;
6366 if ( pIemCpu->aMemMappings[1].pv == pvMem
6367 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6368 return 1;
6369 if ( pIemCpu->aMemMappings[2].pv == pvMem
6370 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6371 return 2;
6372 return VERR_NOT_FOUND;
6373}
6374
6375
6376/**
6377 * Finds a free memmap entry when using iNextMapping doesn't work.
6378 *
6379 * @returns Memory mapping index, 1024 on failure.
6380 * @param pIemCpu The IEM per CPU data.
6381 */
6382static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6383{
6384 /*
6385 * The easy case.
6386 */
6387 if (pIemCpu->cActiveMappings == 0)
6388 {
6389 pIemCpu->iNextMapping = 1;
6390 return 0;
6391 }
6392
6393 /* There should be enough mappings for all instructions. */
6394 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6395
6396 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6397 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6398 return i;
6399
6400 AssertFailedReturn(1024);
6401}
6402
6403
6404/**
6405 * Commits a bounce buffer that needs writing back and unmaps it.
6406 *
6407 * @returns Strict VBox status code.
6408 * @param pIemCpu The IEM per CPU data.
6409 * @param iMemMap The index of the buffer to commit.
6410 */
6411static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6412{
6413 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6414 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6415
6416 /*
6417 * Do the writing.
6418 */
6419 int rc;
6420#ifndef IEM_VERIFICATION_MODE_MINIMAL
6421 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6422 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6423 {
6424 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6425 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6426 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6427 if (!pIemCpu->fBypassHandlers)
6428 {
6429 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6430 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6431 pbBuf,
6432 cbFirst);
6433 if (cbSecond && rc == VINF_SUCCESS)
6434 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6435 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6436 pbBuf + cbFirst,
6437 cbSecond);
6438 }
6439 else
6440 {
6441 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6442 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6443 pbBuf,
6444 cbFirst);
6445 if (cbSecond && rc == VINF_SUCCESS)
6446 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6447 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6448 pbBuf + cbFirst,
6449 cbSecond);
6450 }
6451 if (rc != VINF_SUCCESS)
6452 {
6453 /** @todo status code handling */
6454 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6455 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
6456 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6457 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6458 }
6459 }
6460 else
6461#endif
6462 rc = VINF_SUCCESS;
6463
6464#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6465 /*
6466 * Record the write(s).
6467 */
6468 if (!pIemCpu->fNoRem)
6469 {
6470 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6471 if (pEvtRec)
6472 {
6473 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6474 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6475 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6476 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6477 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6478 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6479 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6480 }
6481 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6482 {
6483 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6484 if (pEvtRec)
6485 {
6486 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6487 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6488 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6489 memcpy(pEvtRec->u.RamWrite.ab,
6490 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6491 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6492 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6493 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6494 }
6495 }
6496 }
6497#endif
6498#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6499 if (rc == VINF_SUCCESS)
6500 {
6501 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6502 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6503 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6504 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6505 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6506 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6507
6508 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6509 g_cbIemWrote = cbWrote;
6510 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6511 }
6512#endif
6513
6514 /*
6515 * Free the mapping entry.
6516 */
6517 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6518 Assert(pIemCpu->cActiveMappings != 0);
6519 pIemCpu->cActiveMappings--;
6520 return rc;
6521}
6522
6523
6524/**
6525 * iemMemMap worker that deals with a request crossing pages.
6526 */
6527static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
6528 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6529{
6530 /*
6531 * Do the address translations.
6532 */
6533 RTGCPHYS GCPhysFirst;
6534 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6535 if (rcStrict != VINF_SUCCESS)
6536 return rcStrict;
6537
6538/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6539 * last byte. */
6540 RTGCPHYS GCPhysSecond;
6541 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6542 if (rcStrict != VINF_SUCCESS)
6543 return rcStrict;
6544 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6545
6546#ifdef IEM_VERIFICATION_MODE_FULL
6547 /*
6548 * Detect problematic memory when verifying so we can select
6549 * the right execution engine. (TLB: Redo this.)
6550 */
6551 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6552 {
6553 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
6554 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6555 if (RT_SUCCESS(rc2))
6556 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
6557 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6558 if (RT_FAILURE(rc2))
6559 pIemCpu->fProblematicMemory = true;
6560 }
6561#endif
6562
6563
6564 /*
6565 * Read in the current memory content if it's a read, execute or partial
6566 * write access.
6567 */
6568 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6569 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6570 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6571
6572 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6573 {
6574 int rc;
6575 if (!pIemCpu->fBypassHandlers)
6576 {
6577 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
6578 if (rc != VINF_SUCCESS)
6579 {
6580 /** @todo status code handling */
6581 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6582 return rc;
6583 }
6584 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
6585 if (rc != VINF_SUCCESS)
6586 {
6587 /** @todo status code handling */
6588 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6589 return rc;
6590 }
6591 }
6592 else
6593 {
6594 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
6595 if (rc != VINF_SUCCESS)
6596 {
6597 /** @todo status code handling */
6598 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6599 return rc;
6600 }
6601 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6602 if (rc != VINF_SUCCESS)
6603 {
6604 /** @todo status code handling */
6605 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6606 return rc;
6607 }
6608 }
6609
6610#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6611 if ( !pIemCpu->fNoRem
6612 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6613 {
6614 /*
6615 * Record the reads.
6616 */
6617 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6618 if (pEvtRec)
6619 {
6620 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6621 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6622 pEvtRec->u.RamRead.cb = cbFirstPage;
6623 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6624 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6625 }
6626 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6627 if (pEvtRec)
6628 {
6629 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6630 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6631 pEvtRec->u.RamRead.cb = cbSecondPage;
6632 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6633 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6634 }
6635 }
6636#endif
6637 }
6638#ifdef VBOX_STRICT
6639 else
6640 memset(pbBuf, 0xcc, cbMem);
6641#endif
6642#ifdef VBOX_STRICT
6643 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6644 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6645#endif
6646
6647 /*
6648 * Commit the bounce buffer entry.
6649 */
6650 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6651 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6652 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6653 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6654 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6655 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6656 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6657 pIemCpu->iNextMapping = iMemMap + 1;
6658 pIemCpu->cActiveMappings++;
6659
6660 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6661 *ppvMem = pbBuf;
6662 return VINF_SUCCESS;
6663}
6664
6665
6666/**
6667 * iemMemMap woker that deals with iemMemPageMap failures.
6668 */
6669static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6670 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6671{
6672 /*
6673 * Filter out conditions we can handle and the ones which shouldn't happen.
6674 */
6675 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6676 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6677 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6678 {
6679 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6680 return rcMap;
6681 }
6682 pIemCpu->cPotentialExits++;
6683
6684 /*
6685 * Read in the current memory content if it's a read, execute or partial
6686 * write access.
6687 */
6688 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6689 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6690 {
6691 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6692 memset(pbBuf, 0xff, cbMem);
6693 else
6694 {
6695 int rc;
6696 if (!pIemCpu->fBypassHandlers)
6697 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
6698 else
6699 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6700 if (rc != VINF_SUCCESS)
6701 {
6702 /** @todo status code handling */
6703 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
6704 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
6705 return rc;
6706 }
6707 }
6708
6709#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6710 if ( !pIemCpu->fNoRem
6711 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6712 {
6713 /*
6714 * Record the read.
6715 */
6716 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6717 if (pEvtRec)
6718 {
6719 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6720 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6721 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6722 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6723 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6724 }
6725 }
6726#endif
6727 }
6728#ifdef VBOX_STRICT
6729 else
6730 memset(pbBuf, 0xcc, cbMem);
6731#endif
6732#ifdef VBOX_STRICT
6733 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6734 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6735#endif
6736
6737 /*
6738 * Commit the bounce buffer entry.
6739 */
6740 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6741 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6742 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6743 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6744 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6745 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6746 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6747 pIemCpu->iNextMapping = iMemMap + 1;
6748 pIemCpu->cActiveMappings++;
6749
6750 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6751 *ppvMem = pbBuf;
6752 return VINF_SUCCESS;
6753}
6754
6755
6756
6757/**
6758 * Maps the specified guest memory for the given kind of access.
6759 *
6760 * This may be using bounce buffering of the memory if it's crossing a page
6761 * boundary or if there is an access handler installed for any of it. Because
6762 * of lock prefix guarantees, we're in for some extra clutter when this
6763 * happens.
6764 *
6765 * This may raise a \#GP, \#SS, \#PF or \#AC.
6766 *
6767 * @returns VBox strict status code.
6768 *
6769 * @param pIemCpu The IEM per CPU data.
6770 * @param ppvMem Where to return the pointer to the mapped
6771 * memory.
6772 * @param cbMem The number of bytes to map. This is usually 1,
6773 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6774 * string operations it can be up to a page.
6775 * @param iSegReg The index of the segment register to use for
6776 * this access. The base and limits are checked.
6777 * Use UINT8_MAX to indicate that no segmentation
6778 * is required (for IDT, GDT and LDT accesses).
6779 * @param GCPtrMem The address of the guest memory.
6780 * @param a_fAccess How the memory is being accessed. The
6781 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6782 * how to map the memory, while the
6783 * IEM_ACCESS_WHAT_XXX bit is used when raising
6784 * exceptions.
6785 */
6786static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6787{
6788 /*
6789 * Check the input and figure out which mapping entry to use.
6790 */
6791 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6792 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6793
6794 unsigned iMemMap = pIemCpu->iNextMapping;
6795 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6796 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6797 {
6798 iMemMap = iemMemMapFindFree(pIemCpu);
6799 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6800 }
6801
6802 /*
6803 * Map the memory, checking that we can actually access it. If something
6804 * slightly complicated happens, fall back on bounce buffering.
6805 */
6806 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6807 if (rcStrict != VINF_SUCCESS)
6808 return rcStrict;
6809
6810 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6811 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6812
6813 RTGCPHYS GCPhysFirst;
6814 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6815 if (rcStrict != VINF_SUCCESS)
6816 return rcStrict;
6817
6818 void *pvMem;
6819 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6820 if (rcStrict != VINF_SUCCESS)
6821 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6822
6823 /*
6824 * Fill in the mapping table entry.
6825 */
6826 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6827 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6828 pIemCpu->iNextMapping = iMemMap + 1;
6829 pIemCpu->cActiveMappings++;
6830
6831 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6832 *ppvMem = pvMem;
6833 return VINF_SUCCESS;
6834}
6835
6836
6837/**
6838 * Commits the guest memory if bounce buffered and unmaps it.
6839 *
6840 * @returns Strict VBox status code.
6841 * @param pIemCpu The IEM per CPU data.
6842 * @param pvMem The mapping.
6843 * @param fAccess The kind of access.
6844 */
6845static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6846{
6847 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
6848 AssertReturn(iMemMap >= 0, iMemMap);
6849
6850 /* If it's bounce buffered, we may need to write back the buffer. */
6851 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6852 {
6853 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6854 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
6855 }
6856 /* Otherwise unlock it. */
6857 else
6858 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6859
6860 /* Free the entry. */
6861 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6862 Assert(pIemCpu->cActiveMappings != 0);
6863 pIemCpu->cActiveMappings--;
6864 return VINF_SUCCESS;
6865}
6866
6867
6868/**
6869 * Rollbacks mappings, releasing page locks and such.
6870 *
6871 * The caller shall only call this after checking cActiveMappings.
6872 *
6873 * @returns Strict VBox status code to pass up.
6874 * @param pIemCpu The IEM per CPU data.
6875 */
6876static void iemMemRollback(PIEMCPU pIemCpu)
6877{
6878 Assert(pIemCpu->cActiveMappings > 0);
6879
6880 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
6881 while (iMemMap-- > 0)
6882 {
6883 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
6884 if (fAccess != IEM_ACCESS_INVALID)
6885 {
6886 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6887 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
6888 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6889 Assert(pIemCpu->cActiveMappings > 0);
6890 pIemCpu->cActiveMappings--;
6891 }
6892 }
6893}
6894
6895
6896/**
6897 * Fetches a data byte.
6898 *
6899 * @returns Strict VBox status code.
6900 * @param pIemCpu The IEM per CPU data.
6901 * @param pu8Dst Where to return the byte.
6902 * @param iSegReg The index of the segment register to use for
6903 * this access. The base and limits are checked.
6904 * @param GCPtrMem The address of the guest memory.
6905 */
6906static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6907{
6908 /* The lazy approach for now... */
6909 uint8_t const *pu8Src;
6910 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6911 if (rc == VINF_SUCCESS)
6912 {
6913 *pu8Dst = *pu8Src;
6914 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6915 }
6916 return rc;
6917}
6918
6919
6920/**
6921 * Fetches a data word.
6922 *
6923 * @returns Strict VBox status code.
6924 * @param pIemCpu The IEM per CPU data.
6925 * @param pu16Dst Where to return the word.
6926 * @param iSegReg The index of the segment register to use for
6927 * this access. The base and limits are checked.
6928 * @param GCPtrMem The address of the guest memory.
6929 */
6930static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6931{
6932 /* The lazy approach for now... */
6933 uint16_t const *pu16Src;
6934 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6935 if (rc == VINF_SUCCESS)
6936 {
6937 *pu16Dst = *pu16Src;
6938 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6939 }
6940 return rc;
6941}
6942
6943
6944/**
6945 * Fetches a data dword.
6946 *
6947 * @returns Strict VBox status code.
6948 * @param pIemCpu The IEM per CPU data.
6949 * @param pu32Dst Where to return the dword.
6950 * @param iSegReg The index of the segment register to use for
6951 * this access. The base and limits are checked.
6952 * @param GCPtrMem The address of the guest memory.
6953 */
6954static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6955{
6956 /* The lazy approach for now... */
6957 uint32_t const *pu32Src;
6958 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6959 if (rc == VINF_SUCCESS)
6960 {
6961 *pu32Dst = *pu32Src;
6962 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6963 }
6964 return rc;
6965}
6966
6967
6968#ifdef SOME_UNUSED_FUNCTION
6969/**
6970 * Fetches a data dword and sign extends it to a qword.
6971 *
6972 * @returns Strict VBox status code.
6973 * @param pIemCpu The IEM per CPU data.
6974 * @param pu64Dst Where to return the sign extended value.
6975 * @param iSegReg The index of the segment register to use for
6976 * this access. The base and limits are checked.
6977 * @param GCPtrMem The address of the guest memory.
6978 */
6979static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6980{
6981 /* The lazy approach for now... */
6982 int32_t const *pi32Src;
6983 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6984 if (rc == VINF_SUCCESS)
6985 {
6986 *pu64Dst = *pi32Src;
6987 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6988 }
6989#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6990 else
6991 *pu64Dst = 0;
6992#endif
6993 return rc;
6994}
6995#endif
6996
6997
6998/**
6999 * Fetches a data qword.
7000 *
7001 * @returns Strict VBox status code.
7002 * @param pIemCpu The IEM per CPU data.
7003 * @param pu64Dst Where to return the qword.
7004 * @param iSegReg The index of the segment register to use for
7005 * this access. The base and limits are checked.
7006 * @param GCPtrMem The address of the guest memory.
7007 */
7008static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7009{
7010 /* The lazy approach for now... */
7011 uint64_t const *pu64Src;
7012 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7013 if (rc == VINF_SUCCESS)
7014 {
7015 *pu64Dst = *pu64Src;
7016 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7017 }
7018 return rc;
7019}
7020
7021
7022/**
7023 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7024 *
7025 * @returns Strict VBox status code.
7026 * @param pIemCpu The IEM per CPU data.
7027 * @param pu64Dst Where to return the qword.
7028 * @param iSegReg The index of the segment register to use for
7029 * this access. The base and limits are checked.
7030 * @param GCPtrMem The address of the guest memory.
7031 */
7032static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7033{
7034 /* The lazy approach for now... */
7035 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7036 if (RT_UNLIKELY(GCPtrMem & 15))
7037 return iemRaiseGeneralProtectionFault0(pIemCpu);
7038
7039 uint64_t const *pu64Src;
7040 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7041 if (rc == VINF_SUCCESS)
7042 {
7043 *pu64Dst = *pu64Src;
7044 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7045 }
7046 return rc;
7047}
7048
7049
7050/**
7051 * Fetches a data tword.
7052 *
7053 * @returns Strict VBox status code.
7054 * @param pIemCpu The IEM per CPU data.
7055 * @param pr80Dst Where to return the tword.
7056 * @param iSegReg The index of the segment register to use for
7057 * this access. The base and limits are checked.
7058 * @param GCPtrMem The address of the guest memory.
7059 */
7060static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7061{
7062 /* The lazy approach for now... */
7063 PCRTFLOAT80U pr80Src;
7064 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7065 if (rc == VINF_SUCCESS)
7066 {
7067 *pr80Dst = *pr80Src;
7068 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7069 }
7070 return rc;
7071}
7072
7073
7074/**
7075 * Fetches a data dqword (double qword), generally SSE related.
7076 *
7077 * @returns Strict VBox status code.
7078 * @param pIemCpu The IEM per CPU data.
7079 * @param pu128Dst Where to return the qword.
7080 * @param iSegReg The index of the segment register to use for
7081 * this access. The base and limits are checked.
7082 * @param GCPtrMem The address of the guest memory.
7083 */
7084static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7085{
7086 /* The lazy approach for now... */
7087 uint128_t const *pu128Src;
7088 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7089 if (rc == VINF_SUCCESS)
7090 {
7091 *pu128Dst = *pu128Src;
7092 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7093 }
7094 return rc;
7095}
7096
7097
7098/**
7099 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7100 * related.
7101 *
7102 * Raises \#GP(0) if not aligned.
7103 *
7104 * @returns Strict VBox status code.
7105 * @param pIemCpu The IEM per CPU data.
7106 * @param pu128Dst Where to return the qword.
7107 * @param iSegReg The index of the segment register to use for
7108 * this access. The base and limits are checked.
7109 * @param GCPtrMem The address of the guest memory.
7110 */
7111static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7112{
7113 /* The lazy approach for now... */
7114 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7115 if ( (GCPtrMem & 15)
7116 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7117 return iemRaiseGeneralProtectionFault0(pIemCpu);
7118
7119 uint128_t const *pu128Src;
7120 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7121 if (rc == VINF_SUCCESS)
7122 {
7123 *pu128Dst = *pu128Src;
7124 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7125 }
7126 return rc;
7127}
7128
7129
7130
7131
7132/**
7133 * Fetches a descriptor register (lgdt, lidt).
7134 *
7135 * @returns Strict VBox status code.
7136 * @param pIemCpu The IEM per CPU data.
7137 * @param pcbLimit Where to return the limit.
7138 * @param pGCPTrBase Where to return the base.
7139 * @param iSegReg The index of the segment register to use for
7140 * this access. The base and limits are checked.
7141 * @param GCPtrMem The address of the guest memory.
7142 * @param enmOpSize The effective operand size.
7143 */
7144static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
7145 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7146{
7147 uint8_t const *pu8Src;
7148 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7149 (void **)&pu8Src,
7150 enmOpSize == IEMMODE_64BIT
7151 ? 2 + 8
7152 : enmOpSize == IEMMODE_32BIT
7153 ? 2 + 4
7154 : 2 + 3,
7155 iSegReg,
7156 GCPtrMem,
7157 IEM_ACCESS_DATA_R);
7158 if (rcStrict == VINF_SUCCESS)
7159 {
7160 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7161 switch (enmOpSize)
7162 {
7163 case IEMMODE_16BIT:
7164 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7165 break;
7166 case IEMMODE_32BIT:
7167 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7168 break;
7169 case IEMMODE_64BIT:
7170 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7171 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7172 break;
7173
7174 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7175 }
7176 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7177 }
7178 return rcStrict;
7179}
7180
7181
7182
7183/**
7184 * Stores a data byte.
7185 *
7186 * @returns Strict VBox status code.
7187 * @param pIemCpu The IEM per CPU data.
7188 * @param iSegReg The index of the segment register to use for
7189 * this access. The base and limits are checked.
7190 * @param GCPtrMem The address of the guest memory.
7191 * @param u8Value The value to store.
7192 */
7193static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7194{
7195 /* The lazy approach for now... */
7196 uint8_t *pu8Dst;
7197 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7198 if (rc == VINF_SUCCESS)
7199 {
7200 *pu8Dst = u8Value;
7201 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7202 }
7203 return rc;
7204}
7205
7206
7207/**
7208 * Stores a data word.
7209 *
7210 * @returns Strict VBox status code.
7211 * @param pIemCpu The IEM per CPU data.
7212 * @param iSegReg The index of the segment register to use for
7213 * this access. The base and limits are checked.
7214 * @param GCPtrMem The address of the guest memory.
7215 * @param u16Value The value to store.
7216 */
7217static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7218{
7219 /* The lazy approach for now... */
7220 uint16_t *pu16Dst;
7221 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7222 if (rc == VINF_SUCCESS)
7223 {
7224 *pu16Dst = u16Value;
7225 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7226 }
7227 return rc;
7228}
7229
7230
7231/**
7232 * Stores a data dword.
7233 *
7234 * @returns Strict VBox status code.
7235 * @param pIemCpu The IEM per CPU data.
7236 * @param iSegReg The index of the segment register to use for
7237 * this access. The base and limits are checked.
7238 * @param GCPtrMem The address of the guest memory.
7239 * @param u32Value The value to store.
7240 */
7241static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7242{
7243 /* The lazy approach for now... */
7244 uint32_t *pu32Dst;
7245 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7246 if (rc == VINF_SUCCESS)
7247 {
7248 *pu32Dst = u32Value;
7249 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7250 }
7251 return rc;
7252}
7253
7254
7255/**
7256 * Stores a data qword.
7257 *
7258 * @returns Strict VBox status code.
7259 * @param pIemCpu The IEM per CPU data.
7260 * @param iSegReg The index of the segment register to use for
7261 * this access. The base and limits are checked.
7262 * @param GCPtrMem The address of the guest memory.
7263 * @param u64Value The value to store.
7264 */
7265static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7266{
7267 /* The lazy approach for now... */
7268 uint64_t *pu64Dst;
7269 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7270 if (rc == VINF_SUCCESS)
7271 {
7272 *pu64Dst = u64Value;
7273 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7274 }
7275 return rc;
7276}
7277
7278
7279/**
7280 * Stores a data dqword.
7281 *
7282 * @returns Strict VBox status code.
7283 * @param pIemCpu The IEM per CPU data.
7284 * @param iSegReg The index of the segment register to use for
7285 * this access. The base and limits are checked.
7286 * @param GCPtrMem The address of the guest memory.
7287 * @param u64Value The value to store.
7288 */
7289static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7290{
7291 /* The lazy approach for now... */
7292 uint128_t *pu128Dst;
7293 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7294 if (rc == VINF_SUCCESS)
7295 {
7296 *pu128Dst = u128Value;
7297 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7298 }
7299 return rc;
7300}
7301
7302
7303/**
7304 * Stores a data dqword, SSE aligned.
7305 *
7306 * @returns Strict VBox status code.
7307 * @param pIemCpu The IEM per CPU data.
7308 * @param iSegReg The index of the segment register to use for
7309 * this access. The base and limits are checked.
7310 * @param GCPtrMem The address of the guest memory.
7311 * @param u64Value The value to store.
7312 */
7313static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7314{
7315 /* The lazy approach for now... */
7316 if ( (GCPtrMem & 15)
7317 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7318 return iemRaiseGeneralProtectionFault0(pIemCpu);
7319
7320 uint128_t *pu128Dst;
7321 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7322 if (rc == VINF_SUCCESS)
7323 {
7324 *pu128Dst = u128Value;
7325 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7326 }
7327 return rc;
7328}
7329
7330
7331/**
7332 * Stores a descriptor register (sgdt, sidt).
7333 *
7334 * @returns Strict VBox status code.
7335 * @param pIemCpu The IEM per CPU data.
7336 * @param cbLimit The limit.
7337 * @param GCPTrBase The base address.
7338 * @param iSegReg The index of the segment register to use for
7339 * this access. The base and limits are checked.
7340 * @param GCPtrMem The address of the guest memory.
7341 * @param enmOpSize The effective operand size.
7342 */
7343static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
7344 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7345{
7346 uint8_t *pu8Src;
7347 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7348 (void **)&pu8Src,
7349 enmOpSize == IEMMODE_64BIT
7350 ? 2 + 8
7351 : enmOpSize == IEMMODE_32BIT
7352 ? 2 + 4
7353 : 2 + 3,
7354 iSegReg,
7355 GCPtrMem,
7356 IEM_ACCESS_DATA_W);
7357 if (rcStrict == VINF_SUCCESS)
7358 {
7359 pu8Src[0] = RT_BYTE1(cbLimit);
7360 pu8Src[1] = RT_BYTE2(cbLimit);
7361 pu8Src[2] = RT_BYTE1(GCPtrBase);
7362 pu8Src[3] = RT_BYTE2(GCPtrBase);
7363 pu8Src[4] = RT_BYTE3(GCPtrBase);
7364 if (enmOpSize == IEMMODE_16BIT)
7365 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7366 else
7367 {
7368 pu8Src[5] = RT_BYTE4(GCPtrBase);
7369 if (enmOpSize == IEMMODE_64BIT)
7370 {
7371 pu8Src[6] = RT_BYTE5(GCPtrBase);
7372 pu8Src[7] = RT_BYTE6(GCPtrBase);
7373 pu8Src[8] = RT_BYTE7(GCPtrBase);
7374 pu8Src[9] = RT_BYTE8(GCPtrBase);
7375 }
7376 }
7377 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7378 }
7379 return rcStrict;
7380}
7381
7382
7383/**
7384 * Pushes a word onto the stack.
7385 *
7386 * @returns Strict VBox status code.
7387 * @param pIemCpu The IEM per CPU data.
7388 * @param u16Value The value to push.
7389 */
7390static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7391{
7392 /* Increment the stack pointer. */
7393 uint64_t uNewRsp;
7394 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7395 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7396
7397 /* Write the word the lazy way. */
7398 uint16_t *pu16Dst;
7399 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7400 if (rc == VINF_SUCCESS)
7401 {
7402 *pu16Dst = u16Value;
7403 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7404 }
7405
7406 /* Commit the new RSP value unless we an access handler made trouble. */
7407 if (rc == VINF_SUCCESS)
7408 pCtx->rsp = uNewRsp;
7409
7410 return rc;
7411}
7412
7413
7414/**
7415 * Pushes a dword onto the stack.
7416 *
7417 * @returns Strict VBox status code.
7418 * @param pIemCpu The IEM per CPU data.
7419 * @param u32Value The value to push.
7420 */
7421static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7422{
7423 /* Increment the stack pointer. */
7424 uint64_t uNewRsp;
7425 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7426 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7427
7428 /* Write the dword the lazy way. */
7429 uint32_t *pu32Dst;
7430 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7431 if (rc == VINF_SUCCESS)
7432 {
7433 *pu32Dst = u32Value;
7434 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7435 }
7436
7437 /* Commit the new RSP value unless we an access handler made trouble. */
7438 if (rc == VINF_SUCCESS)
7439 pCtx->rsp = uNewRsp;
7440
7441 return rc;
7442}
7443
7444
7445/**
7446 * Pushes a dword segment register value onto the stack.
7447 *
7448 * @returns Strict VBox status code.
7449 * @param pIemCpu The IEM per CPU data.
7450 * @param u16Value The value to push.
7451 */
7452static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7453{
7454 /* Increment the stack pointer. */
7455 uint64_t uNewRsp;
7456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7457 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7458
7459 VBOXSTRICTRC rc;
7460 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7461 {
7462 /* The recompiler writes a full dword. */
7463 uint32_t *pu32Dst;
7464 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7465 if (rc == VINF_SUCCESS)
7466 {
7467 *pu32Dst = u32Value;
7468 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7469 }
7470 }
7471 else
7472 {
7473 /* The intel docs talks about zero extending the selector register
7474 value. My actual intel CPU here might be zero extending the value
7475 but it still only writes the lower word... */
7476 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7477 * happens when crossing an electric page boundrary, is the high word
7478 * checked for write accessibility or not? Probably it is. What about
7479 * segment limits? */
7480 uint16_t *pu16Dst;
7481 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7482 if (rc == VINF_SUCCESS)
7483 {
7484 *pu16Dst = (uint16_t)u32Value;
7485 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7486 }
7487 }
7488
7489 /* Commit the new RSP value unless we an access handler made trouble. */
7490 if (rc == VINF_SUCCESS)
7491 pCtx->rsp = uNewRsp;
7492
7493 return rc;
7494}
7495
7496
7497/**
7498 * Pushes a qword onto the stack.
7499 *
7500 * @returns Strict VBox status code.
7501 * @param pIemCpu The IEM per CPU data.
7502 * @param u64Value The value to push.
7503 */
7504static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7505{
7506 /* Increment the stack pointer. */
7507 uint64_t uNewRsp;
7508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7509 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7510
7511 /* Write the word the lazy way. */
7512 uint64_t *pu64Dst;
7513 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7514 if (rc == VINF_SUCCESS)
7515 {
7516 *pu64Dst = u64Value;
7517 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7518 }
7519
7520 /* Commit the new RSP value unless we an access handler made trouble. */
7521 if (rc == VINF_SUCCESS)
7522 pCtx->rsp = uNewRsp;
7523
7524 return rc;
7525}
7526
7527
7528/**
7529 * Pops a word from the stack.
7530 *
7531 * @returns Strict VBox status code.
7532 * @param pIemCpu The IEM per CPU data.
7533 * @param pu16Value Where to store the popped value.
7534 */
7535static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7536{
7537 /* Increment the stack pointer. */
7538 uint64_t uNewRsp;
7539 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7540 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7541
7542 /* Write the word the lazy way. */
7543 uint16_t const *pu16Src;
7544 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7545 if (rc == VINF_SUCCESS)
7546 {
7547 *pu16Value = *pu16Src;
7548 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7549
7550 /* Commit the new RSP value. */
7551 if (rc == VINF_SUCCESS)
7552 pCtx->rsp = uNewRsp;
7553 }
7554
7555 return rc;
7556}
7557
7558
7559/**
7560 * Pops a dword from the stack.
7561 *
7562 * @returns Strict VBox status code.
7563 * @param pIemCpu The IEM per CPU data.
7564 * @param pu32Value Where to store the popped value.
7565 */
7566static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7567{
7568 /* Increment the stack pointer. */
7569 uint64_t uNewRsp;
7570 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7571 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7572
7573 /* Write the word the lazy way. */
7574 uint32_t const *pu32Src;
7575 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 *pu32Value = *pu32Src;
7579 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7580
7581 /* Commit the new RSP value. */
7582 if (rc == VINF_SUCCESS)
7583 pCtx->rsp = uNewRsp;
7584 }
7585
7586 return rc;
7587}
7588
7589
7590/**
7591 * Pops a qword from the stack.
7592 *
7593 * @returns Strict VBox status code.
7594 * @param pIemCpu The IEM per CPU data.
7595 * @param pu64Value Where to store the popped value.
7596 */
7597static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7598{
7599 /* Increment the stack pointer. */
7600 uint64_t uNewRsp;
7601 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7602 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7603
7604 /* Write the word the lazy way. */
7605 uint64_t const *pu64Src;
7606 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7607 if (rc == VINF_SUCCESS)
7608 {
7609 *pu64Value = *pu64Src;
7610 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7611
7612 /* Commit the new RSP value. */
7613 if (rc == VINF_SUCCESS)
7614 pCtx->rsp = uNewRsp;
7615 }
7616
7617 return rc;
7618}
7619
7620
7621/**
7622 * Pushes a word onto the stack, using a temporary stack pointer.
7623 *
7624 * @returns Strict VBox status code.
7625 * @param pIemCpu The IEM per CPU data.
7626 * @param u16Value The value to push.
7627 * @param pTmpRsp Pointer to the temporary stack pointer.
7628 */
7629static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7630{
7631 /* Increment the stack pointer. */
7632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7633 RTUINT64U NewRsp = *pTmpRsp;
7634 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7635
7636 /* Write the word the lazy way. */
7637 uint16_t *pu16Dst;
7638 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7639 if (rc == VINF_SUCCESS)
7640 {
7641 *pu16Dst = u16Value;
7642 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7643 }
7644
7645 /* Commit the new RSP value unless we an access handler made trouble. */
7646 if (rc == VINF_SUCCESS)
7647 *pTmpRsp = NewRsp;
7648
7649 return rc;
7650}
7651
7652
7653/**
7654 * Pushes a dword onto the stack, using a temporary stack pointer.
7655 *
7656 * @returns Strict VBox status code.
7657 * @param pIemCpu The IEM per CPU data.
7658 * @param u32Value The value to push.
7659 * @param pTmpRsp Pointer to the temporary stack pointer.
7660 */
7661static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7662{
7663 /* Increment the stack pointer. */
7664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7665 RTUINT64U NewRsp = *pTmpRsp;
7666 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7667
7668 /* Write the word the lazy way. */
7669 uint32_t *pu32Dst;
7670 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7671 if (rc == VINF_SUCCESS)
7672 {
7673 *pu32Dst = u32Value;
7674 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7675 }
7676
7677 /* Commit the new RSP value unless we an access handler made trouble. */
7678 if (rc == VINF_SUCCESS)
7679 *pTmpRsp = NewRsp;
7680
7681 return rc;
7682}
7683
7684
7685/**
7686 * Pushes a dword onto the stack, using a temporary stack pointer.
7687 *
7688 * @returns Strict VBox status code.
7689 * @param pIemCpu The IEM per CPU data.
7690 * @param u64Value The value to push.
7691 * @param pTmpRsp Pointer to the temporary stack pointer.
7692 */
7693static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7694{
7695 /* Increment the stack pointer. */
7696 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7697 RTUINT64U NewRsp = *pTmpRsp;
7698 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7699
7700 /* Write the word the lazy way. */
7701 uint64_t *pu64Dst;
7702 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7703 if (rc == VINF_SUCCESS)
7704 {
7705 *pu64Dst = u64Value;
7706 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7707 }
7708
7709 /* Commit the new RSP value unless we an access handler made trouble. */
7710 if (rc == VINF_SUCCESS)
7711 *pTmpRsp = NewRsp;
7712
7713 return rc;
7714}
7715
7716
7717/**
7718 * Pops a word from the stack, using a temporary stack pointer.
7719 *
7720 * @returns Strict VBox status code.
7721 * @param pIemCpu The IEM per CPU data.
7722 * @param pu16Value Where to store the popped value.
7723 * @param pTmpRsp Pointer to the temporary stack pointer.
7724 */
7725static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7726{
7727 /* Increment the stack pointer. */
7728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7729 RTUINT64U NewRsp = *pTmpRsp;
7730 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7731
7732 /* Write the word the lazy way. */
7733 uint16_t const *pu16Src;
7734 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7735 if (rc == VINF_SUCCESS)
7736 {
7737 *pu16Value = *pu16Src;
7738 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7739
7740 /* Commit the new RSP value. */
7741 if (rc == VINF_SUCCESS)
7742 *pTmpRsp = NewRsp;
7743 }
7744
7745 return rc;
7746}
7747
7748
7749/**
7750 * Pops a dword from the stack, using a temporary stack pointer.
7751 *
7752 * @returns Strict VBox status code.
7753 * @param pIemCpu The IEM per CPU data.
7754 * @param pu32Value Where to store the popped value.
7755 * @param pTmpRsp Pointer to the temporary stack pointer.
7756 */
7757static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7758{
7759 /* Increment the stack pointer. */
7760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7761 RTUINT64U NewRsp = *pTmpRsp;
7762 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7763
7764 /* Write the word the lazy way. */
7765 uint32_t const *pu32Src;
7766 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7767 if (rc == VINF_SUCCESS)
7768 {
7769 *pu32Value = *pu32Src;
7770 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7771
7772 /* Commit the new RSP value. */
7773 if (rc == VINF_SUCCESS)
7774 *pTmpRsp = NewRsp;
7775 }
7776
7777 return rc;
7778}
7779
7780
7781/**
7782 * Pops a qword from the stack, using a temporary stack pointer.
7783 *
7784 * @returns Strict VBox status code.
7785 * @param pIemCpu The IEM per CPU data.
7786 * @param pu64Value Where to store the popped value.
7787 * @param pTmpRsp Pointer to the temporary stack pointer.
7788 */
7789static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7790{
7791 /* Increment the stack pointer. */
7792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7793 RTUINT64U NewRsp = *pTmpRsp;
7794 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7795
7796 /* Write the word the lazy way. */
7797 uint64_t const *pu64Src;
7798 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7799 if (rcStrict == VINF_SUCCESS)
7800 {
7801 *pu64Value = *pu64Src;
7802 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7803
7804 /* Commit the new RSP value. */
7805 if (rcStrict == VINF_SUCCESS)
7806 *pTmpRsp = NewRsp;
7807 }
7808
7809 return rcStrict;
7810}
7811
7812
7813/**
7814 * Begin a special stack push (used by interrupt, exceptions and such).
7815 *
7816 * This will raise #SS or #PF if appropriate.
7817 *
7818 * @returns Strict VBox status code.
7819 * @param pIemCpu The IEM per CPU data.
7820 * @param cbMem The number of bytes to push onto the stack.
7821 * @param ppvMem Where to return the pointer to the stack memory.
7822 * As with the other memory functions this could be
7823 * direct access or bounce buffered access, so
7824 * don't commit register until the commit call
7825 * succeeds.
7826 * @param puNewRsp Where to return the new RSP value. This must be
7827 * passed unchanged to
7828 * iemMemStackPushCommitSpecial().
7829 */
7830static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7831{
7832 Assert(cbMem < UINT8_MAX);
7833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7834 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7835 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7836}
7837
7838
7839/**
7840 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7841 *
7842 * This will update the rSP.
7843 *
7844 * @returns Strict VBox status code.
7845 * @param pIemCpu The IEM per CPU data.
7846 * @param pvMem The pointer returned by
7847 * iemMemStackPushBeginSpecial().
7848 * @param uNewRsp The new RSP value returned by
7849 * iemMemStackPushBeginSpecial().
7850 */
7851static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
7852{
7853 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
7854 if (rcStrict == VINF_SUCCESS)
7855 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7856 return rcStrict;
7857}
7858
7859
7860/**
7861 * Begin a special stack pop (used by iret, retf and such).
7862 *
7863 * This will raise \#SS or \#PF if appropriate.
7864 *
7865 * @returns Strict VBox status code.
7866 * @param pIemCpu The IEM per CPU data.
7867 * @param cbMem The number of bytes to push onto the stack.
7868 * @param ppvMem Where to return the pointer to the stack memory.
7869 * @param puNewRsp Where to return the new RSP value. This must be
7870 * passed unchanged to
7871 * iemMemStackPopCommitSpecial() or applied
7872 * manually if iemMemStackPopDoneSpecial() is used.
7873 */
7874static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7875{
7876 Assert(cbMem < UINT8_MAX);
7877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7878 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7879 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7880}
7881
7882
7883/**
7884 * Continue a special stack pop (used by iret and retf).
7885 *
7886 * This will raise \#SS or \#PF if appropriate.
7887 *
7888 * @returns Strict VBox status code.
7889 * @param pIemCpu The IEM per CPU data.
7890 * @param cbMem The number of bytes to push onto the stack.
7891 * @param ppvMem Where to return the pointer to the stack memory.
7892 * @param puNewRsp Where to return the new RSP value. This must be
7893 * passed unchanged to
7894 * iemMemStackPopCommitSpecial() or applied
7895 * manually if iemMemStackPopDoneSpecial() is used.
7896 */
7897static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7898{
7899 Assert(cbMem < UINT8_MAX);
7900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7901 RTUINT64U NewRsp;
7902 NewRsp.u = *puNewRsp;
7903 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7904 *puNewRsp = NewRsp.u;
7905 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7906}
7907
7908
7909/**
7910 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7911 *
7912 * This will update the rSP.
7913 *
7914 * @returns Strict VBox status code.
7915 * @param pIemCpu The IEM per CPU data.
7916 * @param pvMem The pointer returned by
7917 * iemMemStackPopBeginSpecial().
7918 * @param uNewRsp The new RSP value returned by
7919 * iemMemStackPopBeginSpecial().
7920 */
7921static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7922{
7923 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7924 if (rcStrict == VINF_SUCCESS)
7925 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7926 return rcStrict;
7927}
7928
7929
7930/**
7931 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7932 * iemMemStackPopContinueSpecial).
7933 *
7934 * The caller will manually commit the rSP.
7935 *
7936 * @returns Strict VBox status code.
7937 * @param pIemCpu The IEM per CPU data.
7938 * @param pvMem The pointer returned by
7939 * iemMemStackPopBeginSpecial() or
7940 * iemMemStackPopContinueSpecial().
7941 */
7942static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
7943{
7944 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7945}
7946
7947
7948/**
7949 * Fetches a system table byte.
7950 *
7951 * @returns Strict VBox status code.
7952 * @param pIemCpu The IEM per CPU data.
7953 * @param pbDst Where to return the byte.
7954 * @param iSegReg The index of the segment register to use for
7955 * this access. The base and limits are checked.
7956 * @param GCPtrMem The address of the guest memory.
7957 */
7958static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7959{
7960 /* The lazy approach for now... */
7961 uint8_t const *pbSrc;
7962 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7963 if (rc == VINF_SUCCESS)
7964 {
7965 *pbDst = *pbSrc;
7966 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7967 }
7968 return rc;
7969}
7970
7971
7972/**
7973 * Fetches a system table word.
7974 *
7975 * @returns Strict VBox status code.
7976 * @param pIemCpu The IEM per CPU data.
7977 * @param pu16Dst Where to return the word.
7978 * @param iSegReg The index of the segment register to use for
7979 * this access. The base and limits are checked.
7980 * @param GCPtrMem The address of the guest memory.
7981 */
7982static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7983{
7984 /* The lazy approach for now... */
7985 uint16_t const *pu16Src;
7986 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7987 if (rc == VINF_SUCCESS)
7988 {
7989 *pu16Dst = *pu16Src;
7990 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7991 }
7992 return rc;
7993}
7994
7995
7996/**
7997 * Fetches a system table dword.
7998 *
7999 * @returns Strict VBox status code.
8000 * @param pIemCpu The IEM per CPU data.
8001 * @param pu32Dst Where to return the dword.
8002 * @param iSegReg The index of the segment register to use for
8003 * this access. The base and limits are checked.
8004 * @param GCPtrMem The address of the guest memory.
8005 */
8006static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8007{
8008 /* The lazy approach for now... */
8009 uint32_t const *pu32Src;
8010 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8011 if (rc == VINF_SUCCESS)
8012 {
8013 *pu32Dst = *pu32Src;
8014 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8015 }
8016 return rc;
8017}
8018
8019
8020/**
8021 * Fetches a system table qword.
8022 *
8023 * @returns Strict VBox status code.
8024 * @param pIemCpu The IEM per CPU data.
8025 * @param pu64Dst Where to return the qword.
8026 * @param iSegReg The index of the segment register to use for
8027 * this access. The base and limits are checked.
8028 * @param GCPtrMem The address of the guest memory.
8029 */
8030static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8031{
8032 /* The lazy approach for now... */
8033 uint64_t const *pu64Src;
8034 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8035 if (rc == VINF_SUCCESS)
8036 {
8037 *pu64Dst = *pu64Src;
8038 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8039 }
8040 return rc;
8041}
8042
8043
8044/**
8045 * Fetches a descriptor table entry with caller specified error code.
8046 *
8047 * @returns Strict VBox status code.
8048 * @param pIemCpu The IEM per CPU.
8049 * @param pDesc Where to return the descriptor table entry.
8050 * @param uSel The selector which table entry to fetch.
8051 * @param uXcpt The exception to raise on table lookup error.
8052 * @param uErrorCode The error code associated with the exception.
8053 */
8054static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
8055 uint16_t uErrorCode)
8056{
8057 AssertPtr(pDesc);
8058 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8059
8060 /** @todo did the 286 require all 8 bytes to be accessible? */
8061 /*
8062 * Get the selector table base and check bounds.
8063 */
8064 RTGCPTR GCPtrBase;
8065 if (uSel & X86_SEL_LDT)
8066 {
8067 if ( !pCtx->ldtr.Attr.n.u1Present
8068 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8069 {
8070 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8071 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8072 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8073 uErrorCode, 0);
8074 }
8075
8076 Assert(pCtx->ldtr.Attr.n.u1Present);
8077 GCPtrBase = pCtx->ldtr.u64Base;
8078 }
8079 else
8080 {
8081 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8082 {
8083 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8084 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8085 uErrorCode, 0);
8086 }
8087 GCPtrBase = pCtx->gdtr.pGdt;
8088 }
8089
8090 /*
8091 * Read the legacy descriptor and maybe the long mode extensions if
8092 * required.
8093 */
8094 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8095 if (rcStrict == VINF_SUCCESS)
8096 {
8097 if ( !IEM_IS_LONG_MODE(pIemCpu)
8098 || pDesc->Legacy.Gen.u1DescType)
8099 pDesc->Long.au64[1] = 0;
8100 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8101 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8102 else
8103 {
8104 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8105 /** @todo is this the right exception? */
8106 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8107 }
8108 }
8109 return rcStrict;
8110}
8111
8112
8113/**
8114 * Fetches a descriptor table entry.
8115 *
8116 * @returns Strict VBox status code.
8117 * @param pIemCpu The IEM per CPU.
8118 * @param pDesc Where to return the descriptor table entry.
8119 * @param uSel The selector which table entry to fetch.
8120 * @param uXcpt The exception to raise on table lookup error.
8121 */
8122static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8123{
8124 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8125}
8126
8127
8128/**
8129 * Fakes a long mode stack selector for SS = 0.
8130 *
8131 * @param pDescSs Where to return the fake stack descriptor.
8132 * @param uDpl The DPL we want.
8133 */
8134static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8135{
8136 pDescSs->Long.au64[0] = 0;
8137 pDescSs->Long.au64[1] = 0;
8138 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8139 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8140 pDescSs->Long.Gen.u2Dpl = uDpl;
8141 pDescSs->Long.Gen.u1Present = 1;
8142 pDescSs->Long.Gen.u1Long = 1;
8143}
8144
8145
8146/**
8147 * Marks the selector descriptor as accessed (only non-system descriptors).
8148 *
8149 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8150 * will therefore skip the limit checks.
8151 *
8152 * @returns Strict VBox status code.
8153 * @param pIemCpu The IEM per CPU.
8154 * @param uSel The selector.
8155 */
8156static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8157{
8158 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8159
8160 /*
8161 * Get the selector table base and calculate the entry address.
8162 */
8163 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8164 ? pCtx->ldtr.u64Base
8165 : pCtx->gdtr.pGdt;
8166 GCPtr += uSel & X86_SEL_MASK;
8167
8168 /*
8169 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8170 * ugly stuff to avoid this. This will make sure it's an atomic access
8171 * as well more or less remove any question about 8-bit or 32-bit accesss.
8172 */
8173 VBOXSTRICTRC rcStrict;
8174 uint32_t volatile *pu32;
8175 if ((GCPtr & 3) == 0)
8176 {
8177 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8178 GCPtr += 2 + 2;
8179 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8180 if (rcStrict != VINF_SUCCESS)
8181 return rcStrict;
8182 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8183 }
8184 else
8185 {
8186 /* The misaligned GDT/LDT case, map the whole thing. */
8187 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8188 if (rcStrict != VINF_SUCCESS)
8189 return rcStrict;
8190 switch ((uintptr_t)pu32 & 3)
8191 {
8192 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8193 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8194 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8195 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8196 }
8197 }
8198
8199 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8200}
8201
8202/** @} */
8203
8204
8205/*
8206 * Include the C/C++ implementation of instruction.
8207 */
8208#include "IEMAllCImpl.cpp.h"
8209
8210
8211
8212/** @name "Microcode" macros.
8213 *
8214 * The idea is that we should be able to use the same code to interpret
8215 * instructions as well as recompiler instructions. Thus this obfuscation.
8216 *
8217 * @{
8218 */
8219#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8220#define IEM_MC_END() }
8221#define IEM_MC_PAUSE() do {} while (0)
8222#define IEM_MC_CONTINUE() do {} while (0)
8223
8224/** Internal macro. */
8225#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8226 do \
8227 { \
8228 VBOXSTRICTRC rcStrict2 = a_Expr; \
8229 if (rcStrict2 != VINF_SUCCESS) \
8230 return rcStrict2; \
8231 } while (0)
8232
8233#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8234#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8235#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8236#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8237#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8238#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8239#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8240
8241#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8242#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8243 do { \
8244 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8245 return iemRaiseDeviceNotAvailable(pIemCpu); \
8246 } while (0)
8247#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8248 do { \
8249 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8250 return iemRaiseMathFault(pIemCpu); \
8251 } while (0)
8252#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8253 do { \
8254 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8255 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8256 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8257 return iemRaiseUndefinedOpcode(pIemCpu); \
8258 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8259 return iemRaiseDeviceNotAvailable(pIemCpu); \
8260 } while (0)
8261#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8262 do { \
8263 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8264 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8265 return iemRaiseUndefinedOpcode(pIemCpu); \
8266 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8267 return iemRaiseDeviceNotAvailable(pIemCpu); \
8268 } while (0)
8269#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8270 do { \
8271 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8272 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8273 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8274 return iemRaiseUndefinedOpcode(pIemCpu); \
8275 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8276 return iemRaiseDeviceNotAvailable(pIemCpu); \
8277 } while (0)
8278#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8279 do { \
8280 if (pIemCpu->uCpl != 0) \
8281 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8282 } while (0)
8283
8284
8285#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8286#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8287#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8288#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8289#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8290#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8291#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8292 uint32_t a_Name; \
8293 uint32_t *a_pName = &a_Name
8294#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8295 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8296
8297#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8298#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8299
8300#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8301#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8302#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8303#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8304#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8305#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8306#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8307#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8308#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8309#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8310#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8311#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8312#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8313#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8314#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8315#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8316#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8317#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8318#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8319#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8320#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8321#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8322#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8323#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8324#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8325#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8326#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8327#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8328#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8329/** @note Not for IOPL or IF testing or modification. */
8330#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8331#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8332#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8333#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8334
8335#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8336#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8337#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8338#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8339#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8340#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8341#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8342#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8343#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8344#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8345#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8346 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8347
8348#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8349#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8350/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8351 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8352#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8353#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8354/** @note Not for IOPL or IF testing or modification. */
8355#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8356
8357#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8358#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8359#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8360 do { \
8361 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8362 *pu32Reg += (a_u32Value); \
8363 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8364 } while (0)
8365#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8366
8367#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8368#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8369#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8370 do { \
8371 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8372 *pu32Reg -= (a_u32Value); \
8373 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8374 } while (0)
8375#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8376
8377#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8378#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8379#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8380#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8381#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8382#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8383#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8384
8385#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8386#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8387#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8388#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8389
8390#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8391#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8392#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8393
8394#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8395#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8396
8397#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8398#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8399#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8400
8401#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8402#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8403#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8404
8405#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8406
8407#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8408
8409#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8410#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8411#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8412 do { \
8413 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8414 *pu32Reg &= (a_u32Value); \
8415 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8416 } while (0)
8417#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8418
8419#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8420#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8421#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8422 do { \
8423 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8424 *pu32Reg |= (a_u32Value); \
8425 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8426 } while (0)
8427#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8428
8429
8430/** @note Not for IOPL or IF modification. */
8431#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8432/** @note Not for IOPL or IF modification. */
8433#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8434/** @note Not for IOPL or IF modification. */
8435#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8436
8437#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8438
8439
8440#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8441 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8442#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8443 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8444#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8445 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8446#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8447 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8448#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8449 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8450#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8451 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8452#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8453 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8454
8455#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8456 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8457#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8458 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8459#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8460 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8461#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8462 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8463#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8464 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8465 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8466 } while (0)
8467#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8468 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8469 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8470 } while (0)
8471#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8472 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8473#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8474 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8475#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8476 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8477
8478#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8479 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8480#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8482#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8484
8485#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8486 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8487#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8488 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8489#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8490 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8491
8492#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8493 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8494#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8495 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8496#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8497 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8498
8499#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8500 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8501
8502#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8503 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8504#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8505 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8506#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8507 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8508#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8509 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8510
8511#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8512 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8513#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8514 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8515#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8516 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8517
8518#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8519 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8520#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8521 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8522
8523
8524
8525#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8526 do { \
8527 uint8_t u8Tmp; \
8528 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8529 (a_u16Dst) = u8Tmp; \
8530 } while (0)
8531#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8532 do { \
8533 uint8_t u8Tmp; \
8534 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8535 (a_u32Dst) = u8Tmp; \
8536 } while (0)
8537#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8538 do { \
8539 uint8_t u8Tmp; \
8540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8541 (a_u64Dst) = u8Tmp; \
8542 } while (0)
8543#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8544 do { \
8545 uint16_t u16Tmp; \
8546 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8547 (a_u32Dst) = u16Tmp; \
8548 } while (0)
8549#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8550 do { \
8551 uint16_t u16Tmp; \
8552 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8553 (a_u64Dst) = u16Tmp; \
8554 } while (0)
8555#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8556 do { \
8557 uint32_t u32Tmp; \
8558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8559 (a_u64Dst) = u32Tmp; \
8560 } while (0)
8561
8562#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8563 do { \
8564 uint8_t u8Tmp; \
8565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8566 (a_u16Dst) = (int8_t)u8Tmp; \
8567 } while (0)
8568#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8569 do { \
8570 uint8_t u8Tmp; \
8571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8572 (a_u32Dst) = (int8_t)u8Tmp; \
8573 } while (0)
8574#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8575 do { \
8576 uint8_t u8Tmp; \
8577 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8578 (a_u64Dst) = (int8_t)u8Tmp; \
8579 } while (0)
8580#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8581 do { \
8582 uint16_t u16Tmp; \
8583 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8584 (a_u32Dst) = (int16_t)u16Tmp; \
8585 } while (0)
8586#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8587 do { \
8588 uint16_t u16Tmp; \
8589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8590 (a_u64Dst) = (int16_t)u16Tmp; \
8591 } while (0)
8592#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8593 do { \
8594 uint32_t u32Tmp; \
8595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8596 (a_u64Dst) = (int32_t)u32Tmp; \
8597 } while (0)
8598
8599#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8600 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8601#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8602 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8603#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8604 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8605#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8606 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8607
8608#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8609 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8610#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8611 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8612#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8613 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8614#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8615 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8616
8617#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8618#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8619#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8620#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8621#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8622#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8623#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8624 do { \
8625 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8626 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8627 } while (0)
8628
8629#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8630 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8631#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8632 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8633
8634
8635#define IEM_MC_PUSH_U16(a_u16Value) \
8636 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8637#define IEM_MC_PUSH_U32(a_u32Value) \
8638 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8639#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8640 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8641#define IEM_MC_PUSH_U64(a_u64Value) \
8642 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8643
8644#define IEM_MC_POP_U16(a_pu16Value) \
8645 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8646#define IEM_MC_POP_U32(a_pu32Value) \
8647 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8648#define IEM_MC_POP_U64(a_pu64Value) \
8649 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8650
8651/** Maps guest memory for direct or bounce buffered access.
8652 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8653 * @remarks May return.
8654 */
8655#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8656 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8657
8658/** Maps guest memory for direct or bounce buffered access.
8659 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8660 * @remarks May return.
8661 */
8662#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8663 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8664
8665/** Commits the memory and unmaps the guest memory.
8666 * @remarks May return.
8667 */
8668#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8669 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8670
8671/** Commits the memory and unmaps the guest memory unless the FPU status word
8672 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8673 * that would cause FLD not to store.
8674 *
8675 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8676 * store, while \#P will not.
8677 *
8678 * @remarks May in theory return - for now.
8679 */
8680#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8681 do { \
8682 if ( !(a_u16FSW & X86_FSW_ES) \
8683 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8684 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8685 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8686 } while (0)
8687
8688/** Calculate efficient address from R/M. */
8689#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8690 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8691
8692#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8693#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8694#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8695#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8696#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8697#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8698#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8699
8700/**
8701 * Defers the rest of the instruction emulation to a C implementation routine
8702 * and returns, only taking the standard parameters.
8703 *
8704 * @param a_pfnCImpl The pointer to the C routine.
8705 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8706 */
8707#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8708
8709/**
8710 * Defers the rest of instruction emulation to a C implementation routine and
8711 * returns, taking one argument in addition to the standard ones.
8712 *
8713 * @param a_pfnCImpl The pointer to the C routine.
8714 * @param a0 The argument.
8715 */
8716#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8717
8718/**
8719 * Defers the rest of the instruction emulation to a C implementation routine
8720 * and returns, taking two arguments in addition to the standard ones.
8721 *
8722 * @param a_pfnCImpl The pointer to the C routine.
8723 * @param a0 The first extra argument.
8724 * @param a1 The second extra argument.
8725 */
8726#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8727
8728/**
8729 * Defers the rest of the instruction emulation to a C implementation routine
8730 * and returns, taking three arguments in addition to the standard ones.
8731 *
8732 * @param a_pfnCImpl The pointer to the C routine.
8733 * @param a0 The first extra argument.
8734 * @param a1 The second extra argument.
8735 * @param a2 The third extra argument.
8736 */
8737#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8738
8739/**
8740 * Defers the rest of the instruction emulation to a C implementation routine
8741 * and returns, taking four arguments in addition to the standard ones.
8742 *
8743 * @param a_pfnCImpl The pointer to the C routine.
8744 * @param a0 The first extra argument.
8745 * @param a1 The second extra argument.
8746 * @param a2 The third extra argument.
8747 * @param a3 The fourth extra argument.
8748 */
8749#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8750
8751/**
8752 * Defers the rest of the instruction emulation to a C implementation routine
8753 * and returns, taking two arguments in addition to the standard ones.
8754 *
8755 * @param a_pfnCImpl The pointer to the C routine.
8756 * @param a0 The first extra argument.
8757 * @param a1 The second extra argument.
8758 * @param a2 The third extra argument.
8759 * @param a3 The fourth extra argument.
8760 * @param a4 The fifth extra argument.
8761 */
8762#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8763
8764/**
8765 * Defers the entire instruction emulation to a C implementation routine and
8766 * returns, only taking the standard parameters.
8767 *
8768 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8769 *
8770 * @param a_pfnCImpl The pointer to the C routine.
8771 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8772 */
8773#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8774
8775/**
8776 * Defers the entire instruction emulation to a C implementation routine and
8777 * returns, taking one argument in addition to the standard ones.
8778 *
8779 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8780 *
8781 * @param a_pfnCImpl The pointer to the C routine.
8782 * @param a0 The argument.
8783 */
8784#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8785
8786/**
8787 * Defers the entire instruction emulation to a C implementation routine and
8788 * returns, taking two arguments in addition to the standard ones.
8789 *
8790 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8791 *
8792 * @param a_pfnCImpl The pointer to the C routine.
8793 * @param a0 The first extra argument.
8794 * @param a1 The second extra argument.
8795 */
8796#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8797
8798/**
8799 * Defers the entire instruction emulation to a C implementation routine and
8800 * returns, taking three arguments in addition to the standard ones.
8801 *
8802 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8803 *
8804 * @param a_pfnCImpl The pointer to the C routine.
8805 * @param a0 The first extra argument.
8806 * @param a1 The second extra argument.
8807 * @param a2 The third extra argument.
8808 */
8809#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8810
8811/**
8812 * Calls a FPU assembly implementation taking one visible argument.
8813 *
8814 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8815 * @param a0 The first extra argument.
8816 */
8817#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8818 do { \
8819 iemFpuPrepareUsage(pIemCpu); \
8820 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
8821 } while (0)
8822
8823/**
8824 * Calls a FPU assembly implementation taking two visible arguments.
8825 *
8826 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8827 * @param a0 The first extra argument.
8828 * @param a1 The second extra argument.
8829 */
8830#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8831 do { \
8832 iemFpuPrepareUsage(pIemCpu); \
8833 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8834 } while (0)
8835
8836/**
8837 * Calls a FPU assembly implementation taking three visible arguments.
8838 *
8839 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8840 * @param a0 The first extra argument.
8841 * @param a1 The second extra argument.
8842 * @param a2 The third extra argument.
8843 */
8844#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8845 do { \
8846 iemFpuPrepareUsage(pIemCpu); \
8847 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
8848 } while (0)
8849
8850#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
8851 do { \
8852 (a_FpuData).FSW = (a_FSW); \
8853 (a_FpuData).r80Result = *(a_pr80Value); \
8854 } while (0)
8855
8856/** Pushes FPU result onto the stack. */
8857#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
8858 iemFpuPushResult(pIemCpu, &a_FpuData)
8859/** Pushes FPU result onto the stack and sets the FPUDP. */
8860#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
8861 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
8862
8863/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
8864#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
8865 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
8866
8867/** Stores FPU result in a stack register. */
8868#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
8869 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
8870/** Stores FPU result in a stack register and pops the stack. */
8871#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
8872 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
8873/** Stores FPU result in a stack register and sets the FPUDP. */
8874#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8875 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8876/** Stores FPU result in a stack register, sets the FPUDP, and pops the
8877 * stack. */
8878#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8879 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8880
8881/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
8882#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
8883 iemFpuUpdateOpcodeAndIp(pIemCpu)
8884/** Free a stack register (for FFREE and FFREEP). */
8885#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
8886 iemFpuStackFree(pIemCpu, a_iStReg)
8887/** Increment the FPU stack pointer. */
8888#define IEM_MC_FPU_STACK_INC_TOP() \
8889 iemFpuStackIncTop(pIemCpu)
8890/** Decrement the FPU stack pointer. */
8891#define IEM_MC_FPU_STACK_DEC_TOP() \
8892 iemFpuStackDecTop(pIemCpu)
8893
8894/** Updates the FSW, FOP, FPUIP, and FPUCS. */
8895#define IEM_MC_UPDATE_FSW(a_u16FSW) \
8896 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8897/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
8898#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
8899 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8900/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
8901#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8902 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8903/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
8904#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
8905 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8906/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
8907 * stack. */
8908#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8909 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8910/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
8911#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
8912 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8913
8914/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
8915#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
8916 iemFpuStackUnderflow(pIemCpu, a_iStDst)
8917/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8918 * stack. */
8919#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
8920 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
8921/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8922 * FPUDS. */
8923#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8924 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8925/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8926 * FPUDS. Pops stack. */
8927#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8928 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8929/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8930 * stack twice. */
8931#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8932 iemFpuStackUnderflowThenPopPop(pIemCpu)
8933/** Raises a FPU stack underflow exception for an instruction pushing a result
8934 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8935#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8936 iemFpuStackPushUnderflow(pIemCpu)
8937/** Raises a FPU stack underflow exception for an instruction pushing a result
8938 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8939#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8940 iemFpuStackPushUnderflowTwo(pIemCpu)
8941
8942/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8943 * FPUIP, FPUCS and FOP. */
8944#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
8945 iemFpuStackPushOverflow(pIemCpu)
8946/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8947 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
8948#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
8949 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
8950/** Indicates that we (might) have modified the FPU state. */
8951#define IEM_MC_USED_FPU() \
8952 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
8953
8954/**
8955 * Calls a MMX assembly implementation taking two visible arguments.
8956 *
8957 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8958 * @param a0 The first extra argument.
8959 * @param a1 The second extra argument.
8960 */
8961#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
8962 do { \
8963 iemFpuPrepareUsage(pIemCpu); \
8964 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8965 } while (0)
8966
8967/**
8968 * Calls a MMX assembly implementation taking three visible arguments.
8969 *
8970 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8971 * @param a0 The first extra argument.
8972 * @param a1 The second extra argument.
8973 * @param a2 The third extra argument.
8974 */
8975#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8976 do { \
8977 iemFpuPrepareUsage(pIemCpu); \
8978 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
8979 } while (0)
8980
8981
8982/**
8983 * Calls a SSE assembly implementation taking two visible arguments.
8984 *
8985 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8986 * @param a0 The first extra argument.
8987 * @param a1 The second extra argument.
8988 */
8989#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
8990 do { \
8991 iemFpuPrepareUsageSse(pIemCpu); \
8992 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
8993 } while (0)
8994
8995/**
8996 * Calls a SSE assembly implementation taking three visible arguments.
8997 *
8998 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8999 * @param a0 The first extra argument.
9000 * @param a1 The second extra argument.
9001 * @param a2 The third extra argument.
9002 */
9003#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9004 do { \
9005 iemFpuPrepareUsageSse(pIemCpu); \
9006 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9007 } while (0)
9008
9009
9010/** @note Not for IOPL or IF testing. */
9011#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9012/** @note Not for IOPL or IF testing. */
9013#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9014/** @note Not for IOPL or IF testing. */
9015#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9016/** @note Not for IOPL or IF testing. */
9017#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9018/** @note Not for IOPL or IF testing. */
9019#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9020 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9021 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9022/** @note Not for IOPL or IF testing. */
9023#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9024 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9025 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9026/** @note Not for IOPL or IF testing. */
9027#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9028 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9029 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9030 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9031/** @note Not for IOPL or IF testing. */
9032#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9033 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9034 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9035 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9036#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9037#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9038#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9039/** @note Not for IOPL or IF testing. */
9040#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9041 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9042 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9043/** @note Not for IOPL or IF testing. */
9044#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9045 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9046 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9047/** @note Not for IOPL or IF testing. */
9048#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9049 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9050 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9051/** @note Not for IOPL or IF testing. */
9052#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9053 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9054 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9055/** @note Not for IOPL or IF testing. */
9056#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9057 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9058 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9059/** @note Not for IOPL or IF testing. */
9060#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9061 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9062 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9063#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9064#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9065#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9066 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9067#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9068 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9069#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9070 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9071#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9072 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9073#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9074 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9075#define IEM_MC_IF_FCW_IM() \
9076 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9077
9078#define IEM_MC_ELSE() } else {
9079#define IEM_MC_ENDIF() } do {} while (0)
9080
9081/** @} */
9082
9083
9084/** @name Opcode Debug Helpers.
9085 * @{
9086 */
9087#ifdef DEBUG
9088# define IEMOP_MNEMONIC(a_szMnemonic) \
9089 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9090 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9091# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9092 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9093 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9094#else
9095# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9096# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9097#endif
9098
9099/** @} */
9100
9101
9102/** @name Opcode Helpers.
9103 * @{
9104 */
9105
9106/** The instruction raises an \#UD in real and V8086 mode. */
9107#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9108 do \
9109 { \
9110 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9111 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9112 } while (0)
9113
9114/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9115 * lock prefixed.
9116 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9117#define IEMOP_HLP_NO_LOCK_PREFIX() \
9118 do \
9119 { \
9120 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9121 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9122 } while (0)
9123
9124/** The instruction is not available in 64-bit mode, throw #UD if we're in
9125 * 64-bit mode. */
9126#define IEMOP_HLP_NO_64BIT() \
9127 do \
9128 { \
9129 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9130 return IEMOP_RAISE_INVALID_OPCODE(); \
9131 } while (0)
9132
9133/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9134 * 64-bit mode. */
9135#define IEMOP_HLP_ONLY_64BIT() \
9136 do \
9137 { \
9138 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9139 return IEMOP_RAISE_INVALID_OPCODE(); \
9140 } while (0)
9141
9142/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9143#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9144 do \
9145 { \
9146 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9147 iemRecalEffOpSize64Default(pIemCpu); \
9148 } while (0)
9149
9150/** The instruction has 64-bit operand size if 64-bit mode. */
9151#define IEMOP_HLP_64BIT_OP_SIZE() \
9152 do \
9153 { \
9154 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9155 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9156 } while (0)
9157
9158/** Only a REX prefix immediately preceeding the first opcode byte takes
9159 * effect. This macro helps ensuring this as well as logging bad guest code. */
9160#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9161 do \
9162 { \
9163 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9164 { \
9165 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9166 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9167 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9168 pIemCpu->uRexB = 0; \
9169 pIemCpu->uRexIndex = 0; \
9170 pIemCpu->uRexReg = 0; \
9171 iemRecalEffOpSize(pIemCpu); \
9172 } \
9173 } while (0)
9174
9175/**
9176 * Done decoding.
9177 */
9178#define IEMOP_HLP_DONE_DECODING() \
9179 do \
9180 { \
9181 /*nothing for now, maybe later... */ \
9182 } while (0)
9183
9184/**
9185 * Done decoding, raise \#UD exception if lock prefix present.
9186 */
9187#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9188 do \
9189 { \
9190 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9191 { /* likely */ } \
9192 else \
9193 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9194 } while (0)
9195#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9196 do \
9197 { \
9198 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9199 { /* likely */ } \
9200 else \
9201 { \
9202 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9203 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9204 } \
9205 } while (0)
9206#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9207 do \
9208 { \
9209 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9210 { /* likely */ } \
9211 else \
9212 { \
9213 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9214 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9215 } \
9216 } while (0)
9217/**
9218 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9219 * are present.
9220 */
9221#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9222 do \
9223 { \
9224 if (RT_LIKELY((pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9225 { /* likely */ } \
9226 else \
9227 return IEMOP_RAISE_INVALID_OPCODE(); \
9228 } while (0)
9229
9230
9231/**
9232 * Calculates the effective address of a ModR/M memory operand.
9233 *
9234 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9235 *
9236 * @return Strict VBox status code.
9237 * @param pIemCpu The IEM per CPU data.
9238 * @param bRm The ModRM byte.
9239 * @param cbImm The size of any immediate following the
9240 * effective address opcode bytes. Important for
9241 * RIP relative addressing.
9242 * @param pGCPtrEff Where to return the effective address.
9243 */
9244static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9245{
9246 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9247 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9248#define SET_SS_DEF() \
9249 do \
9250 { \
9251 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9252 pIemCpu->iEffSeg = X86_SREG_SS; \
9253 } while (0)
9254
9255 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9256 {
9257/** @todo Check the effective address size crap! */
9258 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9259 {
9260 uint16_t u16EffAddr;
9261
9262 /* Handle the disp16 form with no registers first. */
9263 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9264 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9265 else
9266 {
9267 /* Get the displacment. */
9268 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9269 {
9270 case 0: u16EffAddr = 0; break;
9271 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9272 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9273 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9274 }
9275
9276 /* Add the base and index registers to the disp. */
9277 switch (bRm & X86_MODRM_RM_MASK)
9278 {
9279 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9280 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9281 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9282 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9283 case 4: u16EffAddr += pCtx->si; break;
9284 case 5: u16EffAddr += pCtx->di; break;
9285 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9286 case 7: u16EffAddr += pCtx->bx; break;
9287 }
9288 }
9289
9290 *pGCPtrEff = u16EffAddr;
9291 }
9292 else
9293 {
9294 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9295 uint32_t u32EffAddr;
9296
9297 /* Handle the disp32 form with no registers first. */
9298 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9299 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9300 else
9301 {
9302 /* Get the register (or SIB) value. */
9303 switch ((bRm & X86_MODRM_RM_MASK))
9304 {
9305 case 0: u32EffAddr = pCtx->eax; break;
9306 case 1: u32EffAddr = pCtx->ecx; break;
9307 case 2: u32EffAddr = pCtx->edx; break;
9308 case 3: u32EffAddr = pCtx->ebx; break;
9309 case 4: /* SIB */
9310 {
9311 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9312
9313 /* Get the index and scale it. */
9314 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9315 {
9316 case 0: u32EffAddr = pCtx->eax; break;
9317 case 1: u32EffAddr = pCtx->ecx; break;
9318 case 2: u32EffAddr = pCtx->edx; break;
9319 case 3: u32EffAddr = pCtx->ebx; break;
9320 case 4: u32EffAddr = 0; /*none */ break;
9321 case 5: u32EffAddr = pCtx->ebp; break;
9322 case 6: u32EffAddr = pCtx->esi; break;
9323 case 7: u32EffAddr = pCtx->edi; break;
9324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9325 }
9326 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9327
9328 /* add base */
9329 switch (bSib & X86_SIB_BASE_MASK)
9330 {
9331 case 0: u32EffAddr += pCtx->eax; break;
9332 case 1: u32EffAddr += pCtx->ecx; break;
9333 case 2: u32EffAddr += pCtx->edx; break;
9334 case 3: u32EffAddr += pCtx->ebx; break;
9335 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9336 case 5:
9337 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9338 {
9339 u32EffAddr += pCtx->ebp;
9340 SET_SS_DEF();
9341 }
9342 else
9343 {
9344 uint32_t u32Disp;
9345 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9346 u32EffAddr += u32Disp;
9347 }
9348 break;
9349 case 6: u32EffAddr += pCtx->esi; break;
9350 case 7: u32EffAddr += pCtx->edi; break;
9351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9352 }
9353 break;
9354 }
9355 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9356 case 6: u32EffAddr = pCtx->esi; break;
9357 case 7: u32EffAddr = pCtx->edi; break;
9358 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9359 }
9360
9361 /* Get and add the displacement. */
9362 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9363 {
9364 case 0:
9365 break;
9366 case 1:
9367 {
9368 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9369 u32EffAddr += i8Disp;
9370 break;
9371 }
9372 case 2:
9373 {
9374 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9375 u32EffAddr += u32Disp;
9376 break;
9377 }
9378 default:
9379 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9380 }
9381
9382 }
9383 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9384 *pGCPtrEff = u32EffAddr;
9385 else
9386 {
9387 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9388 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9389 }
9390 }
9391 }
9392 else
9393 {
9394 uint64_t u64EffAddr;
9395
9396 /* Handle the rip+disp32 form with no registers first. */
9397 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9398 {
9399 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9400 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9401 }
9402 else
9403 {
9404 /* Get the register (or SIB) value. */
9405 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9406 {
9407 case 0: u64EffAddr = pCtx->rax; break;
9408 case 1: u64EffAddr = pCtx->rcx; break;
9409 case 2: u64EffAddr = pCtx->rdx; break;
9410 case 3: u64EffAddr = pCtx->rbx; break;
9411 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9412 case 6: u64EffAddr = pCtx->rsi; break;
9413 case 7: u64EffAddr = pCtx->rdi; break;
9414 case 8: u64EffAddr = pCtx->r8; break;
9415 case 9: u64EffAddr = pCtx->r9; break;
9416 case 10: u64EffAddr = pCtx->r10; break;
9417 case 11: u64EffAddr = pCtx->r11; break;
9418 case 13: u64EffAddr = pCtx->r13; break;
9419 case 14: u64EffAddr = pCtx->r14; break;
9420 case 15: u64EffAddr = pCtx->r15; break;
9421 /* SIB */
9422 case 4:
9423 case 12:
9424 {
9425 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9426
9427 /* Get the index and scale it. */
9428 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9429 {
9430 case 0: u64EffAddr = pCtx->rax; break;
9431 case 1: u64EffAddr = pCtx->rcx; break;
9432 case 2: u64EffAddr = pCtx->rdx; break;
9433 case 3: u64EffAddr = pCtx->rbx; break;
9434 case 4: u64EffAddr = 0; /*none */ break;
9435 case 5: u64EffAddr = pCtx->rbp; break;
9436 case 6: u64EffAddr = pCtx->rsi; break;
9437 case 7: u64EffAddr = pCtx->rdi; break;
9438 case 8: u64EffAddr = pCtx->r8; break;
9439 case 9: u64EffAddr = pCtx->r9; break;
9440 case 10: u64EffAddr = pCtx->r10; break;
9441 case 11: u64EffAddr = pCtx->r11; break;
9442 case 12: u64EffAddr = pCtx->r12; break;
9443 case 13: u64EffAddr = pCtx->r13; break;
9444 case 14: u64EffAddr = pCtx->r14; break;
9445 case 15: u64EffAddr = pCtx->r15; break;
9446 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9447 }
9448 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9449
9450 /* add base */
9451 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9452 {
9453 case 0: u64EffAddr += pCtx->rax; break;
9454 case 1: u64EffAddr += pCtx->rcx; break;
9455 case 2: u64EffAddr += pCtx->rdx; break;
9456 case 3: u64EffAddr += pCtx->rbx; break;
9457 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9458 case 6: u64EffAddr += pCtx->rsi; break;
9459 case 7: u64EffAddr += pCtx->rdi; break;
9460 case 8: u64EffAddr += pCtx->r8; break;
9461 case 9: u64EffAddr += pCtx->r9; break;
9462 case 10: u64EffAddr += pCtx->r10; break;
9463 case 11: u64EffAddr += pCtx->r11; break;
9464 case 12: u64EffAddr += pCtx->r12; break;
9465 case 14: u64EffAddr += pCtx->r14; break;
9466 case 15: u64EffAddr += pCtx->r15; break;
9467 /* complicated encodings */
9468 case 5:
9469 case 13:
9470 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9471 {
9472 if (!pIemCpu->uRexB)
9473 {
9474 u64EffAddr += pCtx->rbp;
9475 SET_SS_DEF();
9476 }
9477 else
9478 u64EffAddr += pCtx->r13;
9479 }
9480 else
9481 {
9482 uint32_t u32Disp;
9483 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9484 u64EffAddr += (int32_t)u32Disp;
9485 }
9486 break;
9487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9488 }
9489 break;
9490 }
9491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9492 }
9493
9494 /* Get and add the displacement. */
9495 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9496 {
9497 case 0:
9498 break;
9499 case 1:
9500 {
9501 int8_t i8Disp;
9502 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9503 u64EffAddr += i8Disp;
9504 break;
9505 }
9506 case 2:
9507 {
9508 uint32_t u32Disp;
9509 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9510 u64EffAddr += (int32_t)u32Disp;
9511 break;
9512 }
9513 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9514 }
9515
9516 }
9517
9518 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9519 *pGCPtrEff = u64EffAddr;
9520 else
9521 {
9522 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9523 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9524 }
9525 }
9526
9527 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9528 return VINF_SUCCESS;
9529}
9530
9531/** @} */
9532
9533
9534
9535/*
9536 * Include the instructions
9537 */
9538#include "IEMAllInstructions.cpp.h"
9539
9540
9541
9542
9543#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9544
9545/**
9546 * Sets up execution verification mode.
9547 */
9548static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9549{
9550 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9551 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9552
9553 /*
9554 * Always note down the address of the current instruction.
9555 */
9556 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9557 pIemCpu->uOldRip = pOrgCtx->rip;
9558
9559 /*
9560 * Enable verification and/or logging.
9561 */
9562 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9563 if ( fNewNoRem
9564 && ( 0
9565#if 0 /* auto enable on first paged protected mode interrupt */
9566 || ( pOrgCtx->eflags.Bits.u1IF
9567 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9568 && TRPMHasTrap(pVCpu)
9569 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9570#endif
9571#if 0
9572 || ( pOrgCtx->cs == 0x10
9573 && ( pOrgCtx->rip == 0x90119e3e
9574 || pOrgCtx->rip == 0x901d9810)
9575#endif
9576#if 0 /* Auto enable DSL - FPU stuff. */
9577 || ( pOrgCtx->cs == 0x10
9578 && (// pOrgCtx->rip == 0xc02ec07f
9579 //|| pOrgCtx->rip == 0xc02ec082
9580 //|| pOrgCtx->rip == 0xc02ec0c9
9581 0
9582 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9583#endif
9584#if 0 /* Auto enable DSL - fstp st0 stuff. */
9585 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9586#endif
9587#if 0
9588 || pOrgCtx->rip == 0x9022bb3a
9589#endif
9590#if 0
9591 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9592#endif
9593#if 0
9594 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9595 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9596#endif
9597#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9598 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9599 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9600 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9601#endif
9602#if 0 /* NT4SP1 - xadd early boot. */
9603 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9604#endif
9605#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9606 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9607#endif
9608#if 0 /* NT4SP1 - cmpxchg (AMD). */
9609 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9610#endif
9611#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9612 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9613#endif
9614#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9615 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9616
9617#endif
9618#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9619 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9620
9621#endif
9622#if 0 /* NT4SP1 - frstor [ecx] */
9623 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9624#endif
9625#if 0 /* xxxxxx - All long mode code. */
9626 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9627#endif
9628#if 0 /* rep movsq linux 3.7 64-bit boot. */
9629 || (pOrgCtx->rip == 0x0000000000100241)
9630#endif
9631#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9632 || (pOrgCtx->rip == 0x000000000215e240)
9633#endif
9634#if 0 /* DOS's size-overridden iret to v8086. */
9635 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9636#endif
9637 )
9638 )
9639 {
9640 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9641 RTLogFlags(NULL, "enabled");
9642 fNewNoRem = false;
9643 }
9644 if (fNewNoRem != pIemCpu->fNoRem)
9645 {
9646 pIemCpu->fNoRem = fNewNoRem;
9647 if (!fNewNoRem)
9648 {
9649 LogAlways(("Enabling verification mode!\n"));
9650 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9651 }
9652 else
9653 LogAlways(("Disabling verification mode!\n"));
9654 }
9655
9656 /*
9657 * Switch state.
9658 */
9659 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9660 {
9661 static CPUMCTX s_DebugCtx; /* Ugly! */
9662
9663 s_DebugCtx = *pOrgCtx;
9664 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9665 }
9666
9667 /*
9668 * See if there is an interrupt pending in TRPM and inject it if we can.
9669 */
9670 pIemCpu->uInjectCpl = UINT8_MAX;
9671 if ( pOrgCtx->eflags.Bits.u1IF
9672 && TRPMHasTrap(pVCpu)
9673 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9674 {
9675 uint8_t u8TrapNo;
9676 TRPMEVENT enmType;
9677 RTGCUINT uErrCode;
9678 RTGCPTR uCr2;
9679 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9680 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9681 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9682 TRPMResetTrap(pVCpu);
9683 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9684 }
9685
9686 /*
9687 * Reset the counters.
9688 */
9689 pIemCpu->cIOReads = 0;
9690 pIemCpu->cIOWrites = 0;
9691 pIemCpu->fIgnoreRaxRdx = false;
9692 pIemCpu->fOverlappingMovs = false;
9693 pIemCpu->fProblematicMemory = false;
9694 pIemCpu->fUndefinedEFlags = 0;
9695
9696 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9697 {
9698 /*
9699 * Free all verification records.
9700 */
9701 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9702 pIemCpu->pIemEvtRecHead = NULL;
9703 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9704 do
9705 {
9706 while (pEvtRec)
9707 {
9708 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9709 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9710 pIemCpu->pFreeEvtRec = pEvtRec;
9711 pEvtRec = pNext;
9712 }
9713 pEvtRec = pIemCpu->pOtherEvtRecHead;
9714 pIemCpu->pOtherEvtRecHead = NULL;
9715 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9716 } while (pEvtRec);
9717 }
9718}
9719
9720
9721/**
9722 * Allocate an event record.
9723 * @returns Pointer to a record.
9724 */
9725static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9726{
9727 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9728 return NULL;
9729
9730 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9731 if (pEvtRec)
9732 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9733 else
9734 {
9735 if (!pIemCpu->ppIemEvtRecNext)
9736 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9737
9738 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9739 if (!pEvtRec)
9740 return NULL;
9741 }
9742 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9743 pEvtRec->pNext = NULL;
9744 return pEvtRec;
9745}
9746
9747
9748/**
9749 * IOMMMIORead notification.
9750 */
9751VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9752{
9753 PVMCPU pVCpu = VMMGetCpu(pVM);
9754 if (!pVCpu)
9755 return;
9756 PIEMCPU pIemCpu = &pVCpu->iem.s;
9757 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9758 if (!pEvtRec)
9759 return;
9760 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9761 pEvtRec->u.RamRead.GCPhys = GCPhys;
9762 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9763 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9764 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9765}
9766
9767
9768/**
9769 * IOMMMIOWrite notification.
9770 */
9771VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9772{
9773 PVMCPU pVCpu = VMMGetCpu(pVM);
9774 if (!pVCpu)
9775 return;
9776 PIEMCPU pIemCpu = &pVCpu->iem.s;
9777 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9778 if (!pEvtRec)
9779 return;
9780 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9781 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9782 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9783 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9784 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9785 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9786 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9787 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9788 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9789}
9790
9791
9792/**
9793 * IOMIOPortRead notification.
9794 */
9795VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9796{
9797 PVMCPU pVCpu = VMMGetCpu(pVM);
9798 if (!pVCpu)
9799 return;
9800 PIEMCPU pIemCpu = &pVCpu->iem.s;
9801 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9802 if (!pEvtRec)
9803 return;
9804 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9805 pEvtRec->u.IOPortRead.Port = Port;
9806 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9807 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9808 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9809}
9810
9811/**
9812 * IOMIOPortWrite notification.
9813 */
9814VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9815{
9816 PVMCPU pVCpu = VMMGetCpu(pVM);
9817 if (!pVCpu)
9818 return;
9819 PIEMCPU pIemCpu = &pVCpu->iem.s;
9820 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9821 if (!pEvtRec)
9822 return;
9823 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9824 pEvtRec->u.IOPortWrite.Port = Port;
9825 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9826 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9827 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9828 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9829}
9830
9831
9832VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9833{
9834 AssertFailed();
9835}
9836
9837
9838VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
9839{
9840 AssertFailed();
9841}
9842
9843
9844/**
9845 * Fakes and records an I/O port read.
9846 *
9847 * @returns VINF_SUCCESS.
9848 * @param pIemCpu The IEM per CPU data.
9849 * @param Port The I/O port.
9850 * @param pu32Value Where to store the fake value.
9851 * @param cbValue The size of the access.
9852 */
9853static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9854{
9855 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9856 if (pEvtRec)
9857 {
9858 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9859 pEvtRec->u.IOPortRead.Port = Port;
9860 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9861 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9862 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9863 }
9864 pIemCpu->cIOReads++;
9865 *pu32Value = 0xcccccccc;
9866 return VINF_SUCCESS;
9867}
9868
9869
9870/**
9871 * Fakes and records an I/O port write.
9872 *
9873 * @returns VINF_SUCCESS.
9874 * @param pIemCpu The IEM per CPU data.
9875 * @param Port The I/O port.
9876 * @param u32Value The value being written.
9877 * @param cbValue The size of the access.
9878 */
9879static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9880{
9881 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9882 if (pEvtRec)
9883 {
9884 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9885 pEvtRec->u.IOPortWrite.Port = Port;
9886 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9887 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9888 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9889 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9890 }
9891 pIemCpu->cIOWrites++;
9892 return VINF_SUCCESS;
9893}
9894
9895
9896/**
9897 * Used to add extra details about a stub case.
9898 * @param pIemCpu The IEM per CPU state.
9899 */
9900static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
9901{
9902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9903 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9904 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9905 char szRegs[4096];
9906 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
9907 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
9908 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
9909 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
9910 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
9911 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
9912 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
9913 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
9914 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
9915 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
9916 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
9917 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
9918 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
9919 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
9920 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
9921 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
9922 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
9923 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
9924 " efer=%016VR{efer}\n"
9925 " pat=%016VR{pat}\n"
9926 " sf_mask=%016VR{sf_mask}\n"
9927 "krnl_gs_base=%016VR{krnl_gs_base}\n"
9928 " lstar=%016VR{lstar}\n"
9929 " star=%016VR{star} cstar=%016VR{cstar}\n"
9930 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
9931 );
9932
9933 char szInstr1[256];
9934 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
9935 DBGF_DISAS_FLAGS_DEFAULT_MODE,
9936 szInstr1, sizeof(szInstr1), NULL);
9937 char szInstr2[256];
9938 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
9939 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9940 szInstr2, sizeof(szInstr2), NULL);
9941
9942 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9943}
9944
9945
9946/**
9947 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9948 * dump to the assertion info.
9949 *
9950 * @param pEvtRec The record to dump.
9951 */
9952static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9953{
9954 switch (pEvtRec->enmEvent)
9955 {
9956 case IEMVERIFYEVENT_IOPORT_READ:
9957 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9958 pEvtRec->u.IOPortWrite.Port,
9959 pEvtRec->u.IOPortWrite.cbValue);
9960 break;
9961 case IEMVERIFYEVENT_IOPORT_WRITE:
9962 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
9963 pEvtRec->u.IOPortWrite.Port,
9964 pEvtRec->u.IOPortWrite.cbValue,
9965 pEvtRec->u.IOPortWrite.u32Value);
9966 break;
9967 case IEMVERIFYEVENT_RAM_READ:
9968 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
9969 pEvtRec->u.RamRead.GCPhys,
9970 pEvtRec->u.RamRead.cb);
9971 break;
9972 case IEMVERIFYEVENT_RAM_WRITE:
9973 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
9974 pEvtRec->u.RamWrite.GCPhys,
9975 pEvtRec->u.RamWrite.cb,
9976 (int)pEvtRec->u.RamWrite.cb,
9977 pEvtRec->u.RamWrite.ab);
9978 break;
9979 default:
9980 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
9981 break;
9982 }
9983}
9984
9985
9986/**
9987 * Raises an assertion on the specified record, showing the given message with
9988 * a record dump attached.
9989 *
9990 * @param pIemCpu The IEM per CPU data.
9991 * @param pEvtRec1 The first record.
9992 * @param pEvtRec2 The second record.
9993 * @param pszMsg The message explaining why we're asserting.
9994 */
9995static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
9996{
9997 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
9998 iemVerifyAssertAddRecordDump(pEvtRec1);
9999 iemVerifyAssertAddRecordDump(pEvtRec2);
10000 iemVerifyAssertMsg2(pIemCpu);
10001 RTAssertPanic();
10002}
10003
10004
10005/**
10006 * Raises an assertion on the specified record, showing the given message with
10007 * a record dump attached.
10008 *
10009 * @param pIemCpu The IEM per CPU data.
10010 * @param pEvtRec1 The first record.
10011 * @param pszMsg The message explaining why we're asserting.
10012 */
10013static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10014{
10015 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10016 iemVerifyAssertAddRecordDump(pEvtRec);
10017 iemVerifyAssertMsg2(pIemCpu);
10018 RTAssertPanic();
10019}
10020
10021
10022/**
10023 * Verifies a write record.
10024 *
10025 * @param pIemCpu The IEM per CPU data.
10026 * @param pEvtRec The write record.
10027 * @param fRem Set if REM was doing the other executing. If clear
10028 * it was HM.
10029 */
10030static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10031{
10032 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10033 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10034 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10035 if ( RT_FAILURE(rc)
10036 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10037 {
10038 /* fend off ins */
10039 if ( !pIemCpu->cIOReads
10040 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10041 || ( pEvtRec->u.RamWrite.cb != 1
10042 && pEvtRec->u.RamWrite.cb != 2
10043 && pEvtRec->u.RamWrite.cb != 4) )
10044 {
10045 /* fend off ROMs and MMIO */
10046 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10047 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10048 {
10049 /* fend off fxsave */
10050 if (pEvtRec->u.RamWrite.cb != 512)
10051 {
10052 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10053 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10054 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10055 RTAssertMsg2Add("%s: %.*Rhxs\n"
10056 "iem: %.*Rhxs\n",
10057 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10058 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10059 iemVerifyAssertAddRecordDump(pEvtRec);
10060 iemVerifyAssertMsg2(pIemCpu);
10061 RTAssertPanic();
10062 }
10063 }
10064 }
10065 }
10066
10067}
10068
10069/**
10070 * Performs the post-execution verfication checks.
10071 */
10072static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10073{
10074 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10075 return;
10076
10077 /*
10078 * Switch back the state.
10079 */
10080 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10081 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10082 Assert(pOrgCtx != pDebugCtx);
10083 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10084
10085 /*
10086 * Execute the instruction in REM.
10087 */
10088 bool fRem = false;
10089 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10090 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10091 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10092#ifdef IEM_VERIFICATION_MODE_FULL_HM
10093 if ( HMIsEnabled(pVM)
10094 && pIemCpu->cIOReads == 0
10095 && pIemCpu->cIOWrites == 0
10096 && !pIemCpu->fProblematicMemory)
10097 {
10098 uint64_t uStartRip = pOrgCtx->rip;
10099 unsigned iLoops = 0;
10100 do
10101 {
10102 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10103 iLoops++;
10104 } while ( rc == VINF_SUCCESS
10105 || ( rc == VINF_EM_DBG_STEPPED
10106 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10107 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10108 || ( pOrgCtx->rip != pDebugCtx->rip
10109 && pIemCpu->uInjectCpl != UINT8_MAX
10110 && iLoops < 8) );
10111 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10112 rc = VINF_SUCCESS;
10113 }
10114#endif
10115 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10116 || rc == VINF_IOM_R3_IOPORT_READ
10117 || rc == VINF_IOM_R3_IOPORT_WRITE
10118 || rc == VINF_IOM_R3_MMIO_READ
10119 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10120 || rc == VINF_IOM_R3_MMIO_WRITE
10121 || rc == VINF_CPUM_R3_MSR_READ
10122 || rc == VINF_CPUM_R3_MSR_WRITE
10123 || rc == VINF_EM_RESCHEDULE
10124 )
10125 {
10126 EMRemLock(pVM);
10127 rc = REMR3EmulateInstruction(pVM, pVCpu);
10128 AssertRC(rc);
10129 EMRemUnlock(pVM);
10130 fRem = true;
10131 }
10132
10133 /*
10134 * Compare the register states.
10135 */
10136 unsigned cDiffs = 0;
10137 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10138 {
10139 //Log(("REM and IEM ends up with different registers!\n"));
10140 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10141
10142# define CHECK_FIELD(a_Field) \
10143 do \
10144 { \
10145 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10146 { \
10147 switch (sizeof(pOrgCtx->a_Field)) \
10148 { \
10149 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10150 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10151 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10152 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10153 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10154 } \
10155 cDiffs++; \
10156 } \
10157 } while (0)
10158# define CHECK_XSTATE_FIELD(a_Field) \
10159 do \
10160 { \
10161 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10162 { \
10163 switch (sizeof(pOrgCtx->a_Field)) \
10164 { \
10165 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10166 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10167 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10168 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10169 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10170 } \
10171 cDiffs++; \
10172 } \
10173 } while (0)
10174
10175# define CHECK_BIT_FIELD(a_Field) \
10176 do \
10177 { \
10178 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10179 { \
10180 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10181 cDiffs++; \
10182 } \
10183 } while (0)
10184
10185# define CHECK_SEL(a_Sel) \
10186 do \
10187 { \
10188 CHECK_FIELD(a_Sel.Sel); \
10189 CHECK_FIELD(a_Sel.Attr.u); \
10190 CHECK_FIELD(a_Sel.u64Base); \
10191 CHECK_FIELD(a_Sel.u32Limit); \
10192 CHECK_FIELD(a_Sel.fFlags); \
10193 } while (0)
10194
10195 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10196 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10197
10198#if 1 /* The recompiler doesn't update these the intel way. */
10199 if (fRem)
10200 {
10201 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10202 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10203 pOrgXState->x87.CS = pDebugXState->x87.CS;
10204 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10205 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10206 pOrgXState->x87.DS = pDebugXState->x87.DS;
10207 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10208 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10209 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10210 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10211 }
10212#endif
10213 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10214 {
10215 RTAssertMsg2Weak(" the FPU state differs\n");
10216 cDiffs++;
10217 CHECK_XSTATE_FIELD(x87.FCW);
10218 CHECK_XSTATE_FIELD(x87.FSW);
10219 CHECK_XSTATE_FIELD(x87.FTW);
10220 CHECK_XSTATE_FIELD(x87.FOP);
10221 CHECK_XSTATE_FIELD(x87.FPUIP);
10222 CHECK_XSTATE_FIELD(x87.CS);
10223 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10224 CHECK_XSTATE_FIELD(x87.FPUDP);
10225 CHECK_XSTATE_FIELD(x87.DS);
10226 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10227 CHECK_XSTATE_FIELD(x87.MXCSR);
10228 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10229 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10230 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10231 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10232 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10233 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10234 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10235 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10236 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10237 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10238 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10239 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10240 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10241 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10242 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10243 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10244 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10245 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10246 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10247 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10248 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10249 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10250 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10251 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10252 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10253 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10254 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10255 }
10256 CHECK_FIELD(rip);
10257 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10258 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10259 {
10260 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10261 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10262 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10263 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10264 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10265 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10266 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10267 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10268 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10269 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10270 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10271 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10272 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10273 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10274 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10275 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10276 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10277 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10278 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10279 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10280 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10281 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10282 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10283 }
10284
10285 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10286 CHECK_FIELD(rax);
10287 CHECK_FIELD(rcx);
10288 if (!pIemCpu->fIgnoreRaxRdx)
10289 CHECK_FIELD(rdx);
10290 CHECK_FIELD(rbx);
10291 CHECK_FIELD(rsp);
10292 CHECK_FIELD(rbp);
10293 CHECK_FIELD(rsi);
10294 CHECK_FIELD(rdi);
10295 CHECK_FIELD(r8);
10296 CHECK_FIELD(r9);
10297 CHECK_FIELD(r10);
10298 CHECK_FIELD(r11);
10299 CHECK_FIELD(r12);
10300 CHECK_FIELD(r13);
10301 CHECK_SEL(cs);
10302 CHECK_SEL(ss);
10303 CHECK_SEL(ds);
10304 CHECK_SEL(es);
10305 CHECK_SEL(fs);
10306 CHECK_SEL(gs);
10307 CHECK_FIELD(cr0);
10308
10309 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10310 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10311 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10312 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10313 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10314 {
10315 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10316 { /* ignore */ }
10317 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10318 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10319 && fRem)
10320 { /* ignore */ }
10321 else
10322 CHECK_FIELD(cr2);
10323 }
10324 CHECK_FIELD(cr3);
10325 CHECK_FIELD(cr4);
10326 CHECK_FIELD(dr[0]);
10327 CHECK_FIELD(dr[1]);
10328 CHECK_FIELD(dr[2]);
10329 CHECK_FIELD(dr[3]);
10330 CHECK_FIELD(dr[6]);
10331 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10332 CHECK_FIELD(dr[7]);
10333 CHECK_FIELD(gdtr.cbGdt);
10334 CHECK_FIELD(gdtr.pGdt);
10335 CHECK_FIELD(idtr.cbIdt);
10336 CHECK_FIELD(idtr.pIdt);
10337 CHECK_SEL(ldtr);
10338 CHECK_SEL(tr);
10339 CHECK_FIELD(SysEnter.cs);
10340 CHECK_FIELD(SysEnter.eip);
10341 CHECK_FIELD(SysEnter.esp);
10342 CHECK_FIELD(msrEFER);
10343 CHECK_FIELD(msrSTAR);
10344 CHECK_FIELD(msrPAT);
10345 CHECK_FIELD(msrLSTAR);
10346 CHECK_FIELD(msrCSTAR);
10347 CHECK_FIELD(msrSFMASK);
10348 CHECK_FIELD(msrKERNELGSBASE);
10349
10350 if (cDiffs != 0)
10351 {
10352 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10353 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10354 iemVerifyAssertMsg2(pIemCpu);
10355 RTAssertPanic();
10356 }
10357# undef CHECK_FIELD
10358# undef CHECK_BIT_FIELD
10359 }
10360
10361 /*
10362 * If the register state compared fine, check the verification event
10363 * records.
10364 */
10365 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10366 {
10367 /*
10368 * Compare verficiation event records.
10369 * - I/O port accesses should be a 1:1 match.
10370 */
10371 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10372 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10373 while (pIemRec && pOtherRec)
10374 {
10375 /* Since we might miss RAM writes and reads, ignore reads and check
10376 that any written memory is the same extra ones. */
10377 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10378 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10379 && pIemRec->pNext)
10380 {
10381 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10382 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10383 pIemRec = pIemRec->pNext;
10384 }
10385
10386 /* Do the compare. */
10387 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10388 {
10389 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10390 break;
10391 }
10392 bool fEquals;
10393 switch (pIemRec->enmEvent)
10394 {
10395 case IEMVERIFYEVENT_IOPORT_READ:
10396 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10397 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10398 break;
10399 case IEMVERIFYEVENT_IOPORT_WRITE:
10400 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10401 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10402 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10403 break;
10404 case IEMVERIFYEVENT_RAM_READ:
10405 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10406 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10407 break;
10408 case IEMVERIFYEVENT_RAM_WRITE:
10409 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10410 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10411 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10412 break;
10413 default:
10414 fEquals = false;
10415 break;
10416 }
10417 if (!fEquals)
10418 {
10419 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10420 break;
10421 }
10422
10423 /* advance */
10424 pIemRec = pIemRec->pNext;
10425 pOtherRec = pOtherRec->pNext;
10426 }
10427
10428 /* Ignore extra writes and reads. */
10429 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10430 {
10431 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10432 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10433 pIemRec = pIemRec->pNext;
10434 }
10435 if (pIemRec != NULL)
10436 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10437 else if (pOtherRec != NULL)
10438 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10439 }
10440 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10441}
10442
10443#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10444
10445/* stubs */
10446static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10447{
10448 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10449 return VERR_INTERNAL_ERROR;
10450}
10451
10452static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10453{
10454 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10455 return VERR_INTERNAL_ERROR;
10456}
10457
10458#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10459
10460
10461#ifdef LOG_ENABLED
10462/**
10463 * Logs the current instruction.
10464 * @param pVCpu The cross context virtual CPU structure of the caller.
10465 * @param pCtx The current CPU context.
10466 * @param fSameCtx Set if we have the same context information as the VMM,
10467 * clear if we may have already executed an instruction in
10468 * our debug context. When clear, we assume IEMCPU holds
10469 * valid CPU mode info.
10470 */
10471static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10472{
10473# ifdef IN_RING3
10474 if (LogIs2Enabled())
10475 {
10476 char szInstr[256];
10477 uint32_t cbInstr = 0;
10478 if (fSameCtx)
10479 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10480 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10481 szInstr, sizeof(szInstr), &cbInstr);
10482 else
10483 {
10484 uint32_t fFlags = 0;
10485 switch (pVCpu->iem.s.enmCpuMode)
10486 {
10487 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10488 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10489 case IEMMODE_16BIT:
10490 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10491 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10492 else
10493 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10494 break;
10495 }
10496 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10497 szInstr, sizeof(szInstr), &cbInstr);
10498 }
10499
10500 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10501 Log2(("****\n"
10502 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10503 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10504 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10505 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10506 " %s\n"
10507 ,
10508 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10509 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10510 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10511 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10512 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10513 szInstr));
10514
10515 if (LogIs3Enabled())
10516 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10517 }
10518 else
10519# endif
10520 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10521 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10522}
10523#endif
10524
10525
10526/**
10527 * Makes status code addjustments (pass up from I/O and access handler)
10528 * as well as maintaining statistics.
10529 *
10530 * @returns Strict VBox status code to pass up.
10531 * @param pIemCpu The IEM per CPU data.
10532 * @param rcStrict The status from executing an instruction.
10533 */
10534DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10535{
10536 if (rcStrict != VINF_SUCCESS)
10537 {
10538 if (RT_SUCCESS(rcStrict))
10539 {
10540 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10541 || rcStrict == VINF_IOM_R3_IOPORT_READ
10542 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10543 || rcStrict == VINF_IOM_R3_MMIO_READ
10544 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10545 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10546 || rcStrict == VINF_CPUM_R3_MSR_READ
10547 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10548 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10549 int32_t const rcPassUp = pIemCpu->rcPassUp;
10550 if (rcPassUp == VINF_SUCCESS)
10551 pIemCpu->cRetInfStatuses++;
10552 else if ( rcPassUp < VINF_EM_FIRST
10553 || rcPassUp > VINF_EM_LAST
10554 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10555 {
10556 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10557 pIemCpu->cRetPassUpStatus++;
10558 rcStrict = rcPassUp;
10559 }
10560 else
10561 {
10562 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10563 pIemCpu->cRetInfStatuses++;
10564 }
10565 }
10566 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10567 pIemCpu->cRetAspectNotImplemented++;
10568 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10569 pIemCpu->cRetInstrNotImplemented++;
10570#ifdef IEM_VERIFICATION_MODE_FULL
10571 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10572 rcStrict = VINF_SUCCESS;
10573#endif
10574 else
10575 pIemCpu->cRetErrStatuses++;
10576 }
10577 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10578 {
10579 pIemCpu->cRetPassUpStatus++;
10580 rcStrict = pIemCpu->rcPassUp;
10581 }
10582
10583 return rcStrict;
10584}
10585
10586
10587/**
10588 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10589 * IEMExecOneWithPrefetchedByPC.
10590 *
10591 * @return Strict VBox status code.
10592 * @param pVCpu The current virtual CPU.
10593 * @param pIemCpu The IEM per CPU data.
10594 * @param fExecuteInhibit If set, execute the instruction following CLI,
10595 * POP SS and MOV SS,GR.
10596 */
10597DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10598{
10599 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10600 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10601 if (rcStrict == VINF_SUCCESS)
10602 pIemCpu->cInstructions++;
10603 if (pIemCpu->cActiveMappings > 0)
10604 iemMemRollback(pIemCpu);
10605//#ifdef DEBUG
10606// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10607//#endif
10608
10609 /* Execute the next instruction as well if a cli, pop ss or
10610 mov ss, Gr has just completed successfully. */
10611 if ( fExecuteInhibit
10612 && rcStrict == VINF_SUCCESS
10613 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10614 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10615 {
10616 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10617 if (rcStrict == VINF_SUCCESS)
10618 {
10619# ifdef LOG_ENABLED
10620 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10621# endif
10622 IEM_OPCODE_GET_NEXT_U8(&b);
10623 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10624 if (rcStrict == VINF_SUCCESS)
10625 pIemCpu->cInstructions++;
10626 if (pIemCpu->cActiveMappings > 0)
10627 iemMemRollback(pIemCpu);
10628 }
10629 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10630 }
10631
10632 /*
10633 * Return value fiddling, statistics and sanity assertions.
10634 */
10635 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10636
10637 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10638 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10639#if defined(IEM_VERIFICATION_MODE_FULL)
10640 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10641 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10642 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10643 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10644#endif
10645 return rcStrict;
10646}
10647
10648
10649#ifdef IN_RC
10650/**
10651 * Re-enters raw-mode or ensure we return to ring-3.
10652 *
10653 * @returns rcStrict, maybe modified.
10654 * @param pIemCpu The IEM CPU structure.
10655 * @param pVCpu The cross context virtual CPU structure of the caller.
10656 * @param pCtx The current CPU context.
10657 * @param rcStrict The status code returne by the interpreter.
10658 */
10659DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10660{
10661 if (!pIemCpu->fInPatchCode)
10662 CPUMRawEnter(pVCpu);
10663 return rcStrict;
10664}
10665#endif
10666
10667
10668/**
10669 * Execute one instruction.
10670 *
10671 * @return Strict VBox status code.
10672 * @param pVCpu The current virtual CPU.
10673 */
10674VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10675{
10676 PIEMCPU pIemCpu = &pVCpu->iem.s;
10677
10678#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10679 iemExecVerificationModeSetup(pIemCpu);
10680#endif
10681#ifdef LOG_ENABLED
10682 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10683 iemLogCurInstr(pVCpu, pCtx, true);
10684#endif
10685
10686 /*
10687 * Do the decoding and emulation.
10688 */
10689 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10690 if (rcStrict == VINF_SUCCESS)
10691 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10692
10693#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10694 /*
10695 * Assert some sanity.
10696 */
10697 iemExecVerificationModeCheck(pIemCpu);
10698#endif
10699#ifdef IN_RC
10700 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10701#endif
10702 if (rcStrict != VINF_SUCCESS)
10703 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10704 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10705 return rcStrict;
10706}
10707
10708
10709VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10710{
10711 PIEMCPU pIemCpu = &pVCpu->iem.s;
10712 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10713 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10714
10715 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10716 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10717 if (rcStrict == VINF_SUCCESS)
10718 {
10719 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10720 if (pcbWritten)
10721 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10722 }
10723
10724#ifdef IN_RC
10725 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10726#endif
10727 return rcStrict;
10728}
10729
10730
10731VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10732 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10733{
10734 PIEMCPU pIemCpu = &pVCpu->iem.s;
10735 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10736 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10737
10738 VBOXSTRICTRC rcStrict;
10739 if ( cbOpcodeBytes
10740 && pCtx->rip == OpcodeBytesPC)
10741 {
10742 iemInitDecoder(pIemCpu, false);
10743 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10744 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10745 rcStrict = VINF_SUCCESS;
10746 }
10747 else
10748 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10749 if (rcStrict == VINF_SUCCESS)
10750 {
10751 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10752 }
10753
10754#ifdef IN_RC
10755 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10756#endif
10757 return rcStrict;
10758}
10759
10760
10761VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10762{
10763 PIEMCPU pIemCpu = &pVCpu->iem.s;
10764 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10765 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10766
10767 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10768 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10769 if (rcStrict == VINF_SUCCESS)
10770 {
10771 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10772 if (pcbWritten)
10773 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10774 }
10775
10776#ifdef IN_RC
10777 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10778#endif
10779 return rcStrict;
10780}
10781
10782
10783VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10784 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10785{
10786 PIEMCPU pIemCpu = &pVCpu->iem.s;
10787 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10788 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10789
10790 VBOXSTRICTRC rcStrict;
10791 if ( cbOpcodeBytes
10792 && pCtx->rip == OpcodeBytesPC)
10793 {
10794 iemInitDecoder(pIemCpu, true);
10795 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10796 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10797 rcStrict = VINF_SUCCESS;
10798 }
10799 else
10800 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10801 if (rcStrict == VINF_SUCCESS)
10802 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10803
10804#ifdef IN_RC
10805 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10806#endif
10807 return rcStrict;
10808}
10809
10810
10811VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10812{
10813 PIEMCPU pIemCpu = &pVCpu->iem.s;
10814
10815 /*
10816 * See if there is an interrupt pending in TRPM and inject it if we can.
10817 */
10818#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10820# ifdef IEM_VERIFICATION_MODE_FULL
10821 pIemCpu->uInjectCpl = UINT8_MAX;
10822# endif
10823 if ( pCtx->eflags.Bits.u1IF
10824 && TRPMHasTrap(pVCpu)
10825 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
10826 {
10827 uint8_t u8TrapNo;
10828 TRPMEVENT enmType;
10829 RTGCUINT uErrCode;
10830 RTGCPTR uCr2;
10831 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10832 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10833 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10834 TRPMResetTrap(pVCpu);
10835 }
10836#else
10837 iemExecVerificationModeSetup(pIemCpu);
10838 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10839#endif
10840
10841 /*
10842 * Log the state.
10843 */
10844#ifdef LOG_ENABLED
10845 iemLogCurInstr(pVCpu, pCtx, true);
10846#endif
10847
10848 /*
10849 * Do the decoding and emulation.
10850 */
10851 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10852 if (rcStrict == VINF_SUCCESS)
10853 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10854
10855#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10856 /*
10857 * Assert some sanity.
10858 */
10859 iemExecVerificationModeCheck(pIemCpu);
10860#endif
10861
10862 /*
10863 * Maybe re-enter raw-mode and log.
10864 */
10865#ifdef IN_RC
10866 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10867#endif
10868 if (rcStrict != VINF_SUCCESS)
10869 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10870 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10871 return rcStrict;
10872}
10873
10874
10875
10876/**
10877 * Injects a trap, fault, abort, software interrupt or external interrupt.
10878 *
10879 * The parameter list matches TRPMQueryTrapAll pretty closely.
10880 *
10881 * @returns Strict VBox status code.
10882 * @param pVCpu The current virtual CPU.
10883 * @param u8TrapNo The trap number.
10884 * @param enmType What type is it (trap/fault/abort), software
10885 * interrupt or hardware interrupt.
10886 * @param uErrCode The error code if applicable.
10887 * @param uCr2 The CR2 value if applicable.
10888 * @param cbInstr The instruction length (only relevant for
10889 * software interrupts).
10890 */
10891VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10892 uint8_t cbInstr)
10893{
10894 iemInitDecoder(&pVCpu->iem.s, false);
10895#ifdef DBGFTRACE_ENABLED
10896 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10897 u8TrapNo, enmType, uErrCode, uCr2);
10898#endif
10899
10900 uint32_t fFlags;
10901 switch (enmType)
10902 {
10903 case TRPM_HARDWARE_INT:
10904 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10905 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10906 uErrCode = uCr2 = 0;
10907 break;
10908
10909 case TRPM_SOFTWARE_INT:
10910 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10911 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10912 uErrCode = uCr2 = 0;
10913 break;
10914
10915 case TRPM_TRAP:
10916 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10917 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10918 if (u8TrapNo == X86_XCPT_PF)
10919 fFlags |= IEM_XCPT_FLAGS_CR2;
10920 switch (u8TrapNo)
10921 {
10922 case X86_XCPT_DF:
10923 case X86_XCPT_TS:
10924 case X86_XCPT_NP:
10925 case X86_XCPT_SS:
10926 case X86_XCPT_PF:
10927 case X86_XCPT_AC:
10928 fFlags |= IEM_XCPT_FLAGS_ERR;
10929 break;
10930
10931 case X86_XCPT_NMI:
10932 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
10933 break;
10934 }
10935 break;
10936
10937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10938 }
10939
10940 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10941}
10942
10943
10944/**
10945 * Injects the active TRPM event.
10946 *
10947 * @returns Strict VBox status code.
10948 * @param pVCpu Pointer to the VMCPU.
10949 */
10950VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
10951{
10952#ifndef IEM_IMPLEMENTS_TASKSWITCH
10953 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10954#else
10955 uint8_t u8TrapNo;
10956 TRPMEVENT enmType;
10957 RTGCUINT uErrCode;
10958 RTGCUINTPTR uCr2;
10959 uint8_t cbInstr;
10960 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
10961 if (RT_FAILURE(rc))
10962 return rc;
10963
10964 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10965
10966 /** @todo Are there any other codes that imply the event was successfully
10967 * delivered to the guest? See @bugref{6607}. */
10968 if ( rcStrict == VINF_SUCCESS
10969 || rcStrict == VINF_IEM_RAISED_XCPT)
10970 {
10971 TRPMResetTrap(pVCpu);
10972 }
10973 return rcStrict;
10974#endif
10975}
10976
10977
10978VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10979{
10980 return VERR_NOT_IMPLEMENTED;
10981}
10982
10983
10984VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10985{
10986 return VERR_NOT_IMPLEMENTED;
10987}
10988
10989
10990#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10991/**
10992 * Executes a IRET instruction with default operand size.
10993 *
10994 * This is for PATM.
10995 *
10996 * @returns VBox status code.
10997 * @param pVCpu The current virtual CPU.
10998 * @param pCtxCore The register frame.
10999 */
11000VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11001{
11002 PIEMCPU pIemCpu = &pVCpu->iem.s;
11003 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11004
11005 iemCtxCoreToCtx(pCtx, pCtxCore);
11006 iemInitDecoder(pIemCpu);
11007 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11008 if (rcStrict == VINF_SUCCESS)
11009 iemCtxToCtxCore(pCtxCore, pCtx);
11010 else
11011 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11012 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11013 return rcStrict;
11014}
11015#endif
11016
11017
11018
11019/**
11020 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11021 *
11022 * This API ASSUMES that the caller has already verified that the guest code is
11023 * allowed to access the I/O port. (The I/O port is in the DX register in the
11024 * guest state.)
11025 *
11026 * @returns Strict VBox status code.
11027 * @param pVCpu The cross context per virtual CPU structure.
11028 * @param cbValue The size of the I/O port access (1, 2, or 4).
11029 * @param enmAddrMode The addressing mode.
11030 * @param fRepPrefix Indicates whether a repeat prefix is used
11031 * (doesn't matter which for this instruction).
11032 * @param cbInstr The instruction length in bytes.
11033 * @param iEffSeg The effective segment address.
11034 */
11035VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11036 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11037{
11038 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11039 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11040
11041 /*
11042 * State init.
11043 */
11044 PIEMCPU pIemCpu = &pVCpu->iem.s;
11045 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11046
11047 /*
11048 * Switch orgy for getting to the right handler.
11049 */
11050 VBOXSTRICTRC rcStrict;
11051 if (fRepPrefix)
11052 {
11053 switch (enmAddrMode)
11054 {
11055 case IEMMODE_16BIT:
11056 switch (cbValue)
11057 {
11058 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11059 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11060 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11061 default:
11062 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11063 }
11064 break;
11065
11066 case IEMMODE_32BIT:
11067 switch (cbValue)
11068 {
11069 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11070 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11071 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11072 default:
11073 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11074 }
11075 break;
11076
11077 case IEMMODE_64BIT:
11078 switch (cbValue)
11079 {
11080 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11081 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11082 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11083 default:
11084 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11085 }
11086 break;
11087
11088 default:
11089 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11090 }
11091 }
11092 else
11093 {
11094 switch (enmAddrMode)
11095 {
11096 case IEMMODE_16BIT:
11097 switch (cbValue)
11098 {
11099 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11100 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11101 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11102 default:
11103 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11104 }
11105 break;
11106
11107 case IEMMODE_32BIT:
11108 switch (cbValue)
11109 {
11110 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11111 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11112 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11113 default:
11114 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11115 }
11116 break;
11117
11118 case IEMMODE_64BIT:
11119 switch (cbValue)
11120 {
11121 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11122 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11123 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11124 default:
11125 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11126 }
11127 break;
11128
11129 default:
11130 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11131 }
11132 }
11133
11134 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11135}
11136
11137
11138/**
11139 * Interface for HM and EM for executing string I/O IN (read) instructions.
11140 *
11141 * This API ASSUMES that the caller has already verified that the guest code is
11142 * allowed to access the I/O port. (The I/O port is in the DX register in the
11143 * guest state.)
11144 *
11145 * @returns Strict VBox status code.
11146 * @param pVCpu The cross context per virtual CPU structure.
11147 * @param cbValue The size of the I/O port access (1, 2, or 4).
11148 * @param enmAddrMode The addressing mode.
11149 * @param fRepPrefix Indicates whether a repeat prefix is used
11150 * (doesn't matter which for this instruction).
11151 * @param cbInstr The instruction length in bytes.
11152 */
11153VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11154 bool fRepPrefix, uint8_t cbInstr)
11155{
11156 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11157
11158 /*
11159 * State init.
11160 */
11161 PIEMCPU pIemCpu = &pVCpu->iem.s;
11162 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11163
11164 /*
11165 * Switch orgy for getting to the right handler.
11166 */
11167 VBOXSTRICTRC rcStrict;
11168 if (fRepPrefix)
11169 {
11170 switch (enmAddrMode)
11171 {
11172 case IEMMODE_16BIT:
11173 switch (cbValue)
11174 {
11175 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11176 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11177 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11178 default:
11179 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11180 }
11181 break;
11182
11183 case IEMMODE_32BIT:
11184 switch (cbValue)
11185 {
11186 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11187 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11188 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11189 default:
11190 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11191 }
11192 break;
11193
11194 case IEMMODE_64BIT:
11195 switch (cbValue)
11196 {
11197 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11198 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11199 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11200 default:
11201 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11202 }
11203 break;
11204
11205 default:
11206 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11207 }
11208 }
11209 else
11210 {
11211 switch (enmAddrMode)
11212 {
11213 case IEMMODE_16BIT:
11214 switch (cbValue)
11215 {
11216 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11217 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11218 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11219 default:
11220 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11221 }
11222 break;
11223
11224 case IEMMODE_32BIT:
11225 switch (cbValue)
11226 {
11227 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11228 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11229 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11230 default:
11231 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11232 }
11233 break;
11234
11235 case IEMMODE_64BIT:
11236 switch (cbValue)
11237 {
11238 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11239 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11240 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11241 default:
11242 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11243 }
11244 break;
11245
11246 default:
11247 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11248 }
11249 }
11250
11251 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11252}
11253
11254
11255
11256/**
11257 * Interface for HM and EM to write to a CRx register.
11258 *
11259 * @returns Strict VBox status code.
11260 * @param pVCpu The cross context per virtual CPU structure.
11261 * @param cbInstr The instruction length in bytes.
11262 * @param iCrReg The control register number (destination).
11263 * @param iGReg The general purpose register number (source).
11264 *
11265 * @remarks In ring-0 not all of the state needs to be synced in.
11266 */
11267VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11268{
11269 AssertReturn(cbInstr - 2U <= 15U - 2U, VERR_IEM_INVALID_INSTR_LENGTH);
11270 Assert(iCrReg < 16);
11271 Assert(iGReg < 16);
11272
11273 PIEMCPU pIemCpu = &pVCpu->iem.s;
11274 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11275 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11276 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11277}
11278
11279
11280/**
11281 * Interface for HM and EM to read from a CRx register.
11282 *
11283 * @returns Strict VBox status code.
11284 * @param pVCpu The cross context per virtual CPU structure.
11285 * @param cbInstr The instruction length in bytes.
11286 * @param iGReg The general purpose register number (destination).
11287 * @param iCrReg The control register number (source).
11288 *
11289 * @remarks In ring-0 not all of the state needs to be synced in.
11290 */
11291VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11292{
11293 AssertReturn(cbInstr - 2U <= 15U - 2U, VERR_IEM_INVALID_INSTR_LENGTH);
11294 Assert(iCrReg < 16);
11295 Assert(iGReg < 16);
11296
11297 PIEMCPU pIemCpu = &pVCpu->iem.s;
11298 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11299 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11300 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11301}
11302
11303
11304/**
11305 * Interface for HM and EM to clear the CR0[TS] bit.
11306 *
11307 * @returns Strict VBox status code.
11308 * @param pVCpu The cross context per virtual CPU structure.
11309 * @param cbInstr The instruction length in bytes.
11310 *
11311 * @remarks In ring-0 not all of the state needs to be synced in.
11312 */
11313VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11314{
11315 AssertReturn(cbInstr - 2U <= 15U - 2U, VERR_IEM_INVALID_INSTR_LENGTH);
11316
11317 PIEMCPU pIemCpu = &pVCpu->iem.s;
11318 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11319 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11320 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11321}
11322
11323
11324/**
11325 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11326 *
11327 * @returns Strict VBox status code.
11328 * @param pVCpu The cross context per virtual CPU structure.
11329 * @param cbInstr The instruction length in bytes.
11330 * @param uValue The value to load into CR0.
11331 *
11332 * @remarks In ring-0 not all of the state needs to be synced in.
11333 */
11334VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11335{
11336 AssertReturn(cbInstr - 3U <= 15U - 3U, VERR_IEM_INVALID_INSTR_LENGTH);
11337
11338 PIEMCPU pIemCpu = &pVCpu->iem.s;
11339 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11341 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11342}
11343
11344
11345/**
11346 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11347 *
11348 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11349 *
11350 * @returns Strict VBox status code.
11351 * @param pVCpu The cross context per virtual CPU structure of the
11352 * calling EMT.
11353 * @param cbInstr The instruction length in bytes.
11354 * @remarks In ring-0 not all of the state needs to be synced in.
11355 * @threads EMT(pVCpu)
11356 */
11357VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11358{
11359 AssertReturn(cbInstr - 3U <= 15U - 3U, VERR_IEM_INVALID_INSTR_LENGTH);
11360
11361 PIEMCPU pIemCpu = &pVCpu->iem.s;
11362 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11363 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11364 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11365}
11366
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette