VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 52726

Last change on this file since 52726 was 52726, checked in by vboxsync, 10 years ago

VMM/IEM: minor nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 420.2 KB
Line 
1/* $Id: IEMAll.cpp 52726 2014-09-12 14:05:45Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** @name IEM status codes.
192 *
193 * Not quite sure how this will play out in the end, just aliasing safe status
194 * codes for now.
195 *
196 * @{ */
197#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
198/** @} */
199
200/** Temporary hack to disable the double execution. Will be removed in favor
201 * of a dedicated execution mode in EM. */
202//#define IEM_VERIFICATION_MODE_NO_REM
203
204/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
205 * due to GCC lacking knowledge about the value range of a switch. */
206#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
207
208/**
209 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
210 * occation.
211 */
212#ifdef LOG_ENABLED
213# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
214 do { \
215 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
216 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
217 } while (0)
218#else
219# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
220 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
221#endif
222
223/**
224 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
225 * occation using the supplied logger statement.
226 *
227 * @param a_LoggerArgs What to log on failure.
228 */
229#ifdef LOG_ENABLED
230# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
231 do { \
232 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
233 /*LogFunc(a_LoggerArgs);*/ \
234 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
235 } while (0)
236#else
237# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
238 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
239#endif
240
241/**
242 * Call an opcode decoder function.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF.
246 */
247#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
256
257/**
258 * Call a common opcode decoder function taking one extra argument.
259 *
260 * We're using macors for this so that adding and removing parameters can be
261 * done as we please. See FNIEMOP_DEF_1.
262 */
263#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
264
265/**
266 * Check if we're currently executing in real or virtual 8086 mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in virtual 8086 mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Check if we're currently executing in long mode.
283 *
284 * @returns @c true if it is, @c false if not.
285 * @param a_pIemCpu The IEM state of the current CPU.
286 */
287#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
288
289/**
290 * Check if we're currently executing in real mode.
291 *
292 * @returns @c true if it is, @c false if not.
293 * @param a_pIemCpu The IEM state of the current CPU.
294 */
295#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
296
297/**
298 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
299 */
300#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
301
302/**
303 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
304 */
305#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
306
307/**
308 * Tests if at least on of the specified AMD CPUID features (extended) are
309 * marked present.
310 */
311#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
312
313/**
314 * Checks if an Intel CPUID feature is present.
315 */
316#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
317 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
318 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
319
320/**
321 * Checks if an Intel CPUID feature is present.
322 */
323#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
324 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
325
326/**
327 * Checks if an Intel CPUID feature is present in the host CPU.
328 */
329#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
330 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
331
332/**
333 * Evaluates to true if we're presenting an Intel CPU to the guest.
334 */
335#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
336
337/**
338 * Evaluates to true if we're presenting an AMD CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
341
342/**
343 * Check if the address is canonical.
344 */
345#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
346
347
348/*******************************************************************************
349* Global Variables *
350*******************************************************************************/
351extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
352
353
354/** Function table for the ADD instruction. */
355static const IEMOPBINSIZES g_iemAImpl_add =
356{
357 iemAImpl_add_u8, iemAImpl_add_u8_locked,
358 iemAImpl_add_u16, iemAImpl_add_u16_locked,
359 iemAImpl_add_u32, iemAImpl_add_u32_locked,
360 iemAImpl_add_u64, iemAImpl_add_u64_locked
361};
362
363/** Function table for the ADC instruction. */
364static const IEMOPBINSIZES g_iemAImpl_adc =
365{
366 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
367 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
368 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
369 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
370};
371
372/** Function table for the SUB instruction. */
373static const IEMOPBINSIZES g_iemAImpl_sub =
374{
375 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
376 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
377 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
378 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
379};
380
381/** Function table for the SBB instruction. */
382static const IEMOPBINSIZES g_iemAImpl_sbb =
383{
384 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
385 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
386 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
387 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
388};
389
390/** Function table for the OR instruction. */
391static const IEMOPBINSIZES g_iemAImpl_or =
392{
393 iemAImpl_or_u8, iemAImpl_or_u8_locked,
394 iemAImpl_or_u16, iemAImpl_or_u16_locked,
395 iemAImpl_or_u32, iemAImpl_or_u32_locked,
396 iemAImpl_or_u64, iemAImpl_or_u64_locked
397};
398
399/** Function table for the XOR instruction. */
400static const IEMOPBINSIZES g_iemAImpl_xor =
401{
402 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
403 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
404 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
405 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
406};
407
408/** Function table for the AND instruction. */
409static const IEMOPBINSIZES g_iemAImpl_and =
410{
411 iemAImpl_and_u8, iemAImpl_and_u8_locked,
412 iemAImpl_and_u16, iemAImpl_and_u16_locked,
413 iemAImpl_and_u32, iemAImpl_and_u32_locked,
414 iemAImpl_and_u64, iemAImpl_and_u64_locked
415};
416
417/** Function table for the CMP instruction.
418 * @remarks Making operand order ASSUMPTIONS.
419 */
420static const IEMOPBINSIZES g_iemAImpl_cmp =
421{
422 iemAImpl_cmp_u8, NULL,
423 iemAImpl_cmp_u16, NULL,
424 iemAImpl_cmp_u32, NULL,
425 iemAImpl_cmp_u64, NULL
426};
427
428/** Function table for the TEST instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431static const IEMOPBINSIZES g_iemAImpl_test =
432{
433 iemAImpl_test_u8, NULL,
434 iemAImpl_test_u16, NULL,
435 iemAImpl_test_u32, NULL,
436 iemAImpl_test_u64, NULL
437};
438
439/** Function table for the BT instruction. */
440static const IEMOPBINSIZES g_iemAImpl_bt =
441{
442 NULL, NULL,
443 iemAImpl_bt_u16, NULL,
444 iemAImpl_bt_u32, NULL,
445 iemAImpl_bt_u64, NULL
446};
447
448/** Function table for the BTC instruction. */
449static const IEMOPBINSIZES g_iemAImpl_btc =
450{
451 NULL, NULL,
452 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
453 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
454 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
455};
456
457/** Function table for the BTR instruction. */
458static const IEMOPBINSIZES g_iemAImpl_btr =
459{
460 NULL, NULL,
461 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
462 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
463 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
464};
465
466/** Function table for the BTS instruction. */
467static const IEMOPBINSIZES g_iemAImpl_bts =
468{
469 NULL, NULL,
470 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
471 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
472 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
473};
474
475/** Function table for the BSF instruction. */
476static const IEMOPBINSIZES g_iemAImpl_bsf =
477{
478 NULL, NULL,
479 iemAImpl_bsf_u16, NULL,
480 iemAImpl_bsf_u32, NULL,
481 iemAImpl_bsf_u64, NULL
482};
483
484/** Function table for the BSR instruction. */
485static const IEMOPBINSIZES g_iemAImpl_bsr =
486{
487 NULL, NULL,
488 iemAImpl_bsr_u16, NULL,
489 iemAImpl_bsr_u32, NULL,
490 iemAImpl_bsr_u64, NULL
491};
492
493/** Function table for the IMUL instruction. */
494static const IEMOPBINSIZES g_iemAImpl_imul_two =
495{
496 NULL, NULL,
497 iemAImpl_imul_two_u16, NULL,
498 iemAImpl_imul_two_u32, NULL,
499 iemAImpl_imul_two_u64, NULL
500};
501
502/** Group 1 /r lookup table. */
503static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
504{
505 &g_iemAImpl_add,
506 &g_iemAImpl_or,
507 &g_iemAImpl_adc,
508 &g_iemAImpl_sbb,
509 &g_iemAImpl_and,
510 &g_iemAImpl_sub,
511 &g_iemAImpl_xor,
512 &g_iemAImpl_cmp
513};
514
515/** Function table for the INC instruction. */
516static const IEMOPUNARYSIZES g_iemAImpl_inc =
517{
518 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
519 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
520 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
521 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
522};
523
524/** Function table for the DEC instruction. */
525static const IEMOPUNARYSIZES g_iemAImpl_dec =
526{
527 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
528 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
529 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
530 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
531};
532
533/** Function table for the NEG instruction. */
534static const IEMOPUNARYSIZES g_iemAImpl_neg =
535{
536 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
537 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
538 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
539 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
540};
541
542/** Function table for the NOT instruction. */
543static const IEMOPUNARYSIZES g_iemAImpl_not =
544{
545 iemAImpl_not_u8, iemAImpl_not_u8_locked,
546 iemAImpl_not_u16, iemAImpl_not_u16_locked,
547 iemAImpl_not_u32, iemAImpl_not_u32_locked,
548 iemAImpl_not_u64, iemAImpl_not_u64_locked
549};
550
551
552/** Function table for the ROL instruction. */
553static const IEMOPSHIFTSIZES g_iemAImpl_rol =
554{
555 iemAImpl_rol_u8,
556 iemAImpl_rol_u16,
557 iemAImpl_rol_u32,
558 iemAImpl_rol_u64
559};
560
561/** Function table for the ROR instruction. */
562static const IEMOPSHIFTSIZES g_iemAImpl_ror =
563{
564 iemAImpl_ror_u8,
565 iemAImpl_ror_u16,
566 iemAImpl_ror_u32,
567 iemAImpl_ror_u64
568};
569
570/** Function table for the RCL instruction. */
571static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
572{
573 iemAImpl_rcl_u8,
574 iemAImpl_rcl_u16,
575 iemAImpl_rcl_u32,
576 iemAImpl_rcl_u64
577};
578
579/** Function table for the RCR instruction. */
580static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
581{
582 iemAImpl_rcr_u8,
583 iemAImpl_rcr_u16,
584 iemAImpl_rcr_u32,
585 iemAImpl_rcr_u64
586};
587
588/** Function table for the SHL instruction. */
589static const IEMOPSHIFTSIZES g_iemAImpl_shl =
590{
591 iemAImpl_shl_u8,
592 iemAImpl_shl_u16,
593 iemAImpl_shl_u32,
594 iemAImpl_shl_u64
595};
596
597/** Function table for the SHR instruction. */
598static const IEMOPSHIFTSIZES g_iemAImpl_shr =
599{
600 iemAImpl_shr_u8,
601 iemAImpl_shr_u16,
602 iemAImpl_shr_u32,
603 iemAImpl_shr_u64
604};
605
606/** Function table for the SAR instruction. */
607static const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615
616/** Function table for the MUL instruction. */
617static const IEMOPMULDIVSIZES g_iemAImpl_mul =
618{
619 iemAImpl_mul_u8,
620 iemAImpl_mul_u16,
621 iemAImpl_mul_u32,
622 iemAImpl_mul_u64
623};
624
625/** Function table for the IMUL instruction working implicitly on rAX. */
626static const IEMOPMULDIVSIZES g_iemAImpl_imul =
627{
628 iemAImpl_imul_u8,
629 iemAImpl_imul_u16,
630 iemAImpl_imul_u32,
631 iemAImpl_imul_u64
632};
633
634/** Function table for the DIV instruction. */
635static const IEMOPMULDIVSIZES g_iemAImpl_div =
636{
637 iemAImpl_div_u8,
638 iemAImpl_div_u16,
639 iemAImpl_div_u32,
640 iemAImpl_div_u64
641};
642
643/** Function table for the MUL instruction. */
644static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
645{
646 iemAImpl_idiv_u8,
647 iemAImpl_idiv_u16,
648 iemAImpl_idiv_u32,
649 iemAImpl_idiv_u64
650};
651
652/** Function table for the SHLD instruction */
653static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
654{
655 iemAImpl_shld_u16,
656 iemAImpl_shld_u32,
657 iemAImpl_shld_u64,
658};
659
660/** Function table for the SHRD instruction */
661static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
662{
663 iemAImpl_shrd_u16,
664 iemAImpl_shrd_u32,
665 iemAImpl_shrd_u64,
666};
667
668
669/** Function table for the PUNPCKLBW instruction */
670static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
671/** Function table for the PUNPCKLBD instruction */
672static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
673/** Function table for the PUNPCKLDQ instruction */
674static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
675/** Function table for the PUNPCKLQDQ instruction */
676static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
677
678/** Function table for the PUNPCKHBW instruction */
679static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
680/** Function table for the PUNPCKHBD instruction */
681static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
682/** Function table for the PUNPCKHDQ instruction */
683static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
684/** Function table for the PUNPCKHQDQ instruction */
685static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
686
687/** Function table for the PXOR instruction */
688static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
689/** Function table for the PCMPEQB instruction */
690static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
691/** Function table for the PCMPEQW instruction */
692static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
693/** Function table for the PCMPEQD instruction */
694static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
695
696
697#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
698/** What IEM just wrote. */
699uint8_t g_abIemWrote[256];
700/** How much IEM just wrote. */
701size_t g_cbIemWrote;
702#endif
703
704
705/*******************************************************************************
706* Internal Functions *
707*******************************************************************************/
708static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
709static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
710static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
711static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
712/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
713static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
714static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
715static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
716static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
717static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
718static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
719static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
720static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
721static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
722static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
723static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
724static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
725static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
726static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
727static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
728static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
729static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
730static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
731static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
732static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
733static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
734static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
735static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
736static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
737static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
738static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
739static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
740static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
741
742#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
743static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
744#endif
745static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
746static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
747
748
749
750/**
751 * Sets the pass up status.
752 *
753 * @returns VINF_SUCCESS.
754 * @param pIemCpu The per CPU IEM state of the calling thread.
755 * @param rcPassUp The pass up status. Must be informational.
756 * VINF_SUCCESS is not allowed.
757 */
758static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
759{
760 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
761
762 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
763 if (rcOldPassUp == VINF_SUCCESS)
764 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
765 /* If both are EM scheduling codes, use EM priority rules. */
766 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
767 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
768 {
769 if (rcPassUp < rcOldPassUp)
770 {
771 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
772 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
773 }
774 else
775 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
776 }
777 /* Override EM scheduling with specific status code. */
778 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
779 {
780 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
781 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
782 }
783 /* Don't override specific status code, first come first served. */
784 else
785 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Initializes the execution state.
792 *
793 * @param pIemCpu The per CPU IEM state.
794 * @param fBypassHandlers Whether to bypass access handlers.
795 */
796DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
800
801#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
810#endif
811
812#ifdef VBOX_WITH_RAW_MODE_NOT_R0
813 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
814#endif
815 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
816 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
817 ? IEMMODE_64BIT
818 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
819 ? IEMMODE_32BIT
820 : IEMMODE_16BIT;
821 pIemCpu->enmCpuMode = enmMode;
822#ifdef VBOX_STRICT
823 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
824 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
825 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
826 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
827 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
828 pIemCpu->uRexReg = 127;
829 pIemCpu->uRexB = 127;
830 pIemCpu->uRexIndex = 127;
831 pIemCpu->iEffSeg = 127;
832 pIemCpu->offOpcode = 127;
833 pIemCpu->cbOpcode = 127;
834#endif
835
836 pIemCpu->cActiveMappings = 0;
837 pIemCpu->iNextMapping = 0;
838 pIemCpu->rcPassUp = VINF_SUCCESS;
839 pIemCpu->fBypassHandlers = fBypassHandlers;
840#ifdef VBOX_WITH_RAW_MODE_NOT_R0
841 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
842 && pCtx->cs.u64Base == 0
843 && pCtx->cs.u32Limit == UINT32_MAX
844 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
845 if (!pIemCpu->fInPatchCode)
846 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
847#endif
848}
849
850
851/**
852 * Initializes the decoder state.
853 *
854 * @param pIemCpu The per CPU IEM state.
855 * @param fBypassHandlers Whether to bypass access handlers.
856 */
857DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
858{
859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
860 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
861
862#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
863 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
864 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
865 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
867 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
870 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
871#endif
872
873#ifdef VBOX_WITH_RAW_MODE_NOT_R0
874 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
875#endif
876 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
877#ifdef IEM_VERIFICATION_MODE_FULL
878 if (pIemCpu->uInjectCpl != UINT8_MAX)
879 pIemCpu->uCpl = pIemCpu->uInjectCpl;
880#endif
881 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
882 ? IEMMODE_64BIT
883 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
884 ? IEMMODE_32BIT
885 : IEMMODE_16BIT;
886 pIemCpu->enmCpuMode = enmMode;
887 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
888 pIemCpu->enmEffAddrMode = enmMode;
889 if (enmMode != IEMMODE_64BIT)
890 {
891 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
892 pIemCpu->enmEffOpSize = enmMode;
893 }
894 else
895 {
896 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
897 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
898 }
899 pIemCpu->fPrefixes = 0;
900 pIemCpu->uRexReg = 0;
901 pIemCpu->uRexB = 0;
902 pIemCpu->uRexIndex = 0;
903 pIemCpu->iEffSeg = X86_SREG_DS;
904 pIemCpu->offOpcode = 0;
905 pIemCpu->cbOpcode = 0;
906 pIemCpu->cActiveMappings = 0;
907 pIemCpu->iNextMapping = 0;
908 pIemCpu->rcPassUp = VINF_SUCCESS;
909 pIemCpu->fBypassHandlers = fBypassHandlers;
910#ifdef VBOX_WITH_RAW_MODE_NOT_R0
911 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
912 && pCtx->cs.u64Base == 0
913 && pCtx->cs.u32Limit == UINT32_MAX
914 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
915 if (!pIemCpu->fInPatchCode)
916 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
917#endif
918
919#ifdef DBGFTRACE_ENABLED
920 switch (enmMode)
921 {
922 case IEMMODE_64BIT:
923 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
924 break;
925 case IEMMODE_32BIT:
926 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
927 break;
928 case IEMMODE_16BIT:
929 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
930 break;
931 }
932#endif
933}
934
935
936/**
937 * Prefetch opcodes the first time when starting executing.
938 *
939 * @returns Strict VBox status code.
940 * @param pIemCpu The IEM state.
941 * @param fBypassHandlers Whether to bypass access handlers.
942 */
943static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
944{
945#ifdef IEM_VERIFICATION_MODE_FULL
946 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
947#endif
948 iemInitDecoder(pIemCpu, fBypassHandlers);
949
950 /*
951 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
952 *
953 * First translate CS:rIP to a physical address.
954 */
955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
956 uint32_t cbToTryRead;
957 RTGCPTR GCPtrPC;
958 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
959 {
960 cbToTryRead = PAGE_SIZE;
961 GCPtrPC = pCtx->rip;
962 if (!IEM_IS_CANONICAL(GCPtrPC))
963 return iemRaiseGeneralProtectionFault0(pIemCpu);
964 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
965 }
966 else
967 {
968 uint32_t GCPtrPC32 = pCtx->eip;
969 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
970 if (GCPtrPC32 > pCtx->cs.u32Limit)
971 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
972 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
973 if (!cbToTryRead) /* overflowed */
974 {
975 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
976 cbToTryRead = UINT32_MAX;
977 }
978 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
979 Assert(GCPtrPC <= UINT32_MAX);
980 }
981
982#ifdef VBOX_WITH_RAW_MODE_NOT_R0
983 /* Allow interpretation of patch manager code blocks since they can for
984 instance throw #PFs for perfectly good reasons. */
985 if (pIemCpu->fInPatchCode)
986 {
987 size_t cbRead = 0;
988 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
989 AssertRCReturn(rc, rc);
990 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
991 return VINF_SUCCESS;
992 }
993#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
994
995 RTGCPHYS GCPhys;
996 uint64_t fFlags;
997 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
998 if (RT_FAILURE(rc))
999 {
1000 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1001 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1002 }
1003 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1004 {
1005 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1006 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1007 }
1008 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1009 {
1010 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1011 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1012 }
1013 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1014 /** @todo Check reserved bits and such stuff. PGM is better at doing
1015 * that, so do it when implementing the guest virtual address
1016 * TLB... */
1017
1018#ifdef IEM_VERIFICATION_MODE_FULL
1019 /*
1020 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1021 * instruction.
1022 */
1023 /** @todo optimize this differently by not using PGMPhysRead. */
1024 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1025 pIemCpu->GCPhysOpcodes = GCPhys;
1026 if ( offPrevOpcodes < cbOldOpcodes
1027 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1028 {
1029 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1030 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1031 pIemCpu->cbOpcode = cbNew;
1032 return VINF_SUCCESS;
1033 }
1034#endif
1035
1036 /*
1037 * Read the bytes at this address.
1038 */
1039 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1040#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1041 size_t cbActual;
1042 if ( PATMIsEnabled(pVM)
1043 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1044 {
1045 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1046 Assert(cbActual > 0);
1047 pIemCpu->cbOpcode = (uint8_t)cbActual;
1048 }
1049 else
1050#endif
1051 {
1052 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1053 if (cbToTryRead > cbLeftOnPage)
1054 cbToTryRead = cbLeftOnPage;
1055 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1056 cbToTryRead = sizeof(pIemCpu->abOpcode);
1057
1058 if (!pIemCpu->fBypassHandlers)
1059 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead);
1060 else
1061 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1062 if (rc != VINF_SUCCESS)
1063 {
1064 /** @todo status code handling */
1065 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1066 GCPtrPC, GCPhys, rc, cbToTryRead));
1067 return rc;
1068 }
1069 pIemCpu->cbOpcode = cbToTryRead;
1070 }
1071
1072 return VINF_SUCCESS;
1073}
1074
1075
1076/**
1077 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1078 * exception if it fails.
1079 *
1080 * @returns Strict VBox status code.
1081 * @param pIemCpu The IEM state.
1082 * @param cbMin The minimum number of bytes relative offOpcode
1083 * that must be read.
1084 */
1085static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1086{
1087 /*
1088 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1089 *
1090 * First translate CS:rIP to a physical address.
1091 */
1092 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1093 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1094 uint32_t cbToTryRead;
1095 RTGCPTR GCPtrNext;
1096 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1097 {
1098 cbToTryRead = PAGE_SIZE;
1099 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1100 if (!IEM_IS_CANONICAL(GCPtrNext))
1101 return iemRaiseGeneralProtectionFault0(pIemCpu);
1102 }
1103 else
1104 {
1105 uint32_t GCPtrNext32 = pCtx->eip;
1106 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1107 GCPtrNext32 += pIemCpu->cbOpcode;
1108 if (GCPtrNext32 > pCtx->cs.u32Limit)
1109 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1111 if (!cbToTryRead) /* overflowed */
1112 {
1113 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1114 cbToTryRead = UINT32_MAX;
1115 /** @todo check out wrapping around the code segment. */
1116 }
1117 if (cbToTryRead < cbMin - cbLeft)
1118 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1119 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1120 }
1121
1122 /* Only read up to the end of the page, and make sure we don't read more
1123 than the opcode buffer can hold. */
1124 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1125 if (cbToTryRead > cbLeftOnPage)
1126 cbToTryRead = cbLeftOnPage;
1127 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1128 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1129 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1130
1131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1132 /* Allow interpretation of patch manager code blocks since they can for
1133 instance throw #PFs for perfectly good reasons. */
1134 if (pIemCpu->fInPatchCode)
1135 {
1136 size_t cbRead = 0;
1137 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1138 AssertRCReturn(rc, rc);
1139 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1140 return VINF_SUCCESS;
1141 }
1142#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1143
1144 RTGCPHYS GCPhys;
1145 uint64_t fFlags;
1146 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1147 if (RT_FAILURE(rc))
1148 {
1149 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1150 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1151 }
1152 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1153 {
1154 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1155 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1156 }
1157 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1158 {
1159 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1160 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1161 }
1162 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1163 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1164 /** @todo Check reserved bits and such stuff. PGM is better at doing
1165 * that, so do it when implementing the guest virtual address
1166 * TLB... */
1167
1168 /*
1169 * Read the bytes at this address.
1170 *
1171 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1172 * and since PATM should only patch the start of an instruction there
1173 * should be no need to check again here.
1174 */
1175 if (!pIemCpu->fBypassHandlers)
1176 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1177 else
1178 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1179 if (rc != VINF_SUCCESS)
1180 {
1181 /** @todo status code handling */
1182 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1183 return rc;
1184 }
1185 pIemCpu->cbOpcode += cbToTryRead;
1186 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1187
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1194 *
1195 * @returns Strict VBox status code.
1196 * @param pIemCpu The IEM state.
1197 * @param pb Where to return the opcode byte.
1198 */
1199DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1200{
1201 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1202 if (rcStrict == VINF_SUCCESS)
1203 {
1204 uint8_t offOpcode = pIemCpu->offOpcode;
1205 *pb = pIemCpu->abOpcode[offOpcode];
1206 pIemCpu->offOpcode = offOpcode + 1;
1207 }
1208 else
1209 *pb = 0;
1210 return rcStrict;
1211}
1212
1213
1214/**
1215 * Fetches the next opcode byte.
1216 *
1217 * @returns Strict VBox status code.
1218 * @param pIemCpu The IEM state.
1219 * @param pu8 Where to return the opcode byte.
1220 */
1221DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1222{
1223 uint8_t const offOpcode = pIemCpu->offOpcode;
1224 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1225 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1226
1227 *pu8 = pIemCpu->abOpcode[offOpcode];
1228 pIemCpu->offOpcode = offOpcode + 1;
1229 return VINF_SUCCESS;
1230}
1231
1232
1233/**
1234 * Fetches the next opcode byte, returns automatically on failure.
1235 *
1236 * @param a_pu8 Where to return the opcode byte.
1237 * @remark Implicitly references pIemCpu.
1238 */
1239#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1240 do \
1241 { \
1242 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1243 if (rcStrict2 != VINF_SUCCESS) \
1244 return rcStrict2; \
1245 } while (0)
1246
1247
1248/**
1249 * Fetches the next signed byte from the opcode stream.
1250 *
1251 * @returns Strict VBox status code.
1252 * @param pIemCpu The IEM state.
1253 * @param pi8 Where to return the signed byte.
1254 */
1255DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1256{
1257 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1258}
1259
1260
1261/**
1262 * Fetches the next signed byte from the opcode stream, returning automatically
1263 * on failure.
1264 *
1265 * @param pi8 Where to return the signed byte.
1266 * @remark Implicitly references pIemCpu.
1267 */
1268#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1269 do \
1270 { \
1271 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1272 if (rcStrict2 != VINF_SUCCESS) \
1273 return rcStrict2; \
1274 } while (0)
1275
1276
1277/**
1278 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pIemCpu The IEM state.
1282 * @param pu16 Where to return the opcode dword.
1283 */
1284DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1285{
1286 uint8_t u8;
1287 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1288 if (rcStrict == VINF_SUCCESS)
1289 *pu16 = (int8_t)u8;
1290 return rcStrict;
1291}
1292
1293
1294/**
1295 * Fetches the next signed byte from the opcode stream, extending it to
1296 * unsigned 16-bit.
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pIemCpu The IEM state.
1300 * @param pu16 Where to return the unsigned word.
1301 */
1302DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1303{
1304 uint8_t const offOpcode = pIemCpu->offOpcode;
1305 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1306 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1307
1308 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1309 pIemCpu->offOpcode = offOpcode + 1;
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Fetches the next signed byte from the opcode stream and sign-extending it to
1316 * a word, returning automatically on failure.
1317 *
1318 * @param pu16 Where to return the word.
1319 * @remark Implicitly references pIemCpu.
1320 */
1321#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1322 do \
1323 { \
1324 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1325 if (rcStrict2 != VINF_SUCCESS) \
1326 return rcStrict2; \
1327 } while (0)
1328
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pIemCpu The IEM state.
1335 * @param pu32 Where to return the opcode dword.
1336 */
1337DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1338{
1339 uint8_t u8;
1340 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1341 if (rcStrict == VINF_SUCCESS)
1342 *pu32 = (int8_t)u8;
1343 return rcStrict;
1344}
1345
1346
1347/**
1348 * Fetches the next signed byte from the opcode stream, extending it to
1349 * unsigned 32-bit.
1350 *
1351 * @returns Strict VBox status code.
1352 * @param pIemCpu The IEM state.
1353 * @param pu32 Where to return the unsigned dword.
1354 */
1355DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1356{
1357 uint8_t const offOpcode = pIemCpu->offOpcode;
1358 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1359 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1360
1361 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1362 pIemCpu->offOpcode = offOpcode + 1;
1363 return VINF_SUCCESS;
1364}
1365
1366
1367/**
1368 * Fetches the next signed byte from the opcode stream and sign-extending it to
1369 * a word, returning automatically on failure.
1370 *
1371 * @param pu32 Where to return the word.
1372 * @remark Implicitly references pIemCpu.
1373 */
1374#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1375 do \
1376 { \
1377 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1378 if (rcStrict2 != VINF_SUCCESS) \
1379 return rcStrict2; \
1380 } while (0)
1381
1382
1383/**
1384 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1385 *
1386 * @returns Strict VBox status code.
1387 * @param pIemCpu The IEM state.
1388 * @param pu64 Where to return the opcode qword.
1389 */
1390DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1391{
1392 uint8_t u8;
1393 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1394 if (rcStrict == VINF_SUCCESS)
1395 *pu64 = (int8_t)u8;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Fetches the next signed byte from the opcode stream, extending it to
1402 * unsigned 64-bit.
1403 *
1404 * @returns Strict VBox status code.
1405 * @param pIemCpu The IEM state.
1406 * @param pu64 Where to return the unsigned qword.
1407 */
1408DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1409{
1410 uint8_t const offOpcode = pIemCpu->offOpcode;
1411 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1412 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1413
1414 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1415 pIemCpu->offOpcode = offOpcode + 1;
1416 return VINF_SUCCESS;
1417}
1418
1419
1420/**
1421 * Fetches the next signed byte from the opcode stream and sign-extending it to
1422 * a word, returning automatically on failure.
1423 *
1424 * @param pu64 Where to return the word.
1425 * @remark Implicitly references pIemCpu.
1426 */
1427#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1428 do \
1429 { \
1430 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1431 if (rcStrict2 != VINF_SUCCESS) \
1432 return rcStrict2; \
1433 } while (0)
1434
1435
1436/**
1437 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1438 *
1439 * @returns Strict VBox status code.
1440 * @param pIemCpu The IEM state.
1441 * @param pu16 Where to return the opcode word.
1442 */
1443DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1444{
1445 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1446 if (rcStrict == VINF_SUCCESS)
1447 {
1448 uint8_t offOpcode = pIemCpu->offOpcode;
1449 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1450 pIemCpu->offOpcode = offOpcode + 2;
1451 }
1452 else
1453 *pu16 = 0;
1454 return rcStrict;
1455}
1456
1457
1458/**
1459 * Fetches the next opcode word.
1460 *
1461 * @returns Strict VBox status code.
1462 * @param pIemCpu The IEM state.
1463 * @param pu16 Where to return the opcode word.
1464 */
1465DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1466{
1467 uint8_t const offOpcode = pIemCpu->offOpcode;
1468 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1469 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1470
1471 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1472 pIemCpu->offOpcode = offOpcode + 2;
1473 return VINF_SUCCESS;
1474}
1475
1476
1477/**
1478 * Fetches the next opcode word, returns automatically on failure.
1479 *
1480 * @param a_pu16 Where to return the opcode word.
1481 * @remark Implicitly references pIemCpu.
1482 */
1483#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1484 do \
1485 { \
1486 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1487 if (rcStrict2 != VINF_SUCCESS) \
1488 return rcStrict2; \
1489 } while (0)
1490
1491
1492/**
1493 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1494 *
1495 * @returns Strict VBox status code.
1496 * @param pIemCpu The IEM state.
1497 * @param pu32 Where to return the opcode double word.
1498 */
1499DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1500{
1501 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1502 if (rcStrict == VINF_SUCCESS)
1503 {
1504 uint8_t offOpcode = pIemCpu->offOpcode;
1505 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1506 pIemCpu->offOpcode = offOpcode + 2;
1507 }
1508 else
1509 *pu32 = 0;
1510 return rcStrict;
1511}
1512
1513
1514/**
1515 * Fetches the next opcode word, zero extending it to a double word.
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pIemCpu The IEM state.
1519 * @param pu32 Where to return the opcode double word.
1520 */
1521DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1522{
1523 uint8_t const offOpcode = pIemCpu->offOpcode;
1524 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1525 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1526
1527 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1528 pIemCpu->offOpcode = offOpcode + 2;
1529 return VINF_SUCCESS;
1530}
1531
1532
1533/**
1534 * Fetches the next opcode word and zero extends it to a double word, returns
1535 * automatically on failure.
1536 *
1537 * @param a_pu32 Where to return the opcode double word.
1538 * @remark Implicitly references pIemCpu.
1539 */
1540#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1541 do \
1542 { \
1543 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1544 if (rcStrict2 != VINF_SUCCESS) \
1545 return rcStrict2; \
1546 } while (0)
1547
1548
1549/**
1550 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1551 *
1552 * @returns Strict VBox status code.
1553 * @param pIemCpu The IEM state.
1554 * @param pu64 Where to return the opcode quad word.
1555 */
1556DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1557{
1558 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1559 if (rcStrict == VINF_SUCCESS)
1560 {
1561 uint8_t offOpcode = pIemCpu->offOpcode;
1562 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1563 pIemCpu->offOpcode = offOpcode + 2;
1564 }
1565 else
1566 *pu64 = 0;
1567 return rcStrict;
1568}
1569
1570
1571/**
1572 * Fetches the next opcode word, zero extending it to a quad word.
1573 *
1574 * @returns Strict VBox status code.
1575 * @param pIemCpu The IEM state.
1576 * @param pu64 Where to return the opcode quad word.
1577 */
1578DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1579{
1580 uint8_t const offOpcode = pIemCpu->offOpcode;
1581 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1582 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1583
1584 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1585 pIemCpu->offOpcode = offOpcode + 2;
1586 return VINF_SUCCESS;
1587}
1588
1589
1590/**
1591 * Fetches the next opcode word and zero extends it to a quad word, returns
1592 * automatically on failure.
1593 *
1594 * @param a_pu64 Where to return the opcode quad word.
1595 * @remark Implicitly references pIemCpu.
1596 */
1597#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1598 do \
1599 { \
1600 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1601 if (rcStrict2 != VINF_SUCCESS) \
1602 return rcStrict2; \
1603 } while (0)
1604
1605
1606/**
1607 * Fetches the next signed word from the opcode stream.
1608 *
1609 * @returns Strict VBox status code.
1610 * @param pIemCpu The IEM state.
1611 * @param pi16 Where to return the signed word.
1612 */
1613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1614{
1615 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1616}
1617
1618
1619/**
1620 * Fetches the next signed word from the opcode stream, returning automatically
1621 * on failure.
1622 *
1623 * @param pi16 Where to return the signed word.
1624 * @remark Implicitly references pIemCpu.
1625 */
1626#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1627 do \
1628 { \
1629 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1630 if (rcStrict2 != VINF_SUCCESS) \
1631 return rcStrict2; \
1632 } while (0)
1633
1634
1635/**
1636 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1637 *
1638 * @returns Strict VBox status code.
1639 * @param pIemCpu The IEM state.
1640 * @param pu32 Where to return the opcode dword.
1641 */
1642DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1643{
1644 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1645 if (rcStrict == VINF_SUCCESS)
1646 {
1647 uint8_t offOpcode = pIemCpu->offOpcode;
1648 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1649 pIemCpu->abOpcode[offOpcode + 1],
1650 pIemCpu->abOpcode[offOpcode + 2],
1651 pIemCpu->abOpcode[offOpcode + 3]);
1652 pIemCpu->offOpcode = offOpcode + 4;
1653 }
1654 else
1655 *pu32 = 0;
1656 return rcStrict;
1657}
1658
1659
1660/**
1661 * Fetches the next opcode dword.
1662 *
1663 * @returns Strict VBox status code.
1664 * @param pIemCpu The IEM state.
1665 * @param pu32 Where to return the opcode double word.
1666 */
1667DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1668{
1669 uint8_t const offOpcode = pIemCpu->offOpcode;
1670 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1671 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1672
1673 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1674 pIemCpu->abOpcode[offOpcode + 1],
1675 pIemCpu->abOpcode[offOpcode + 2],
1676 pIemCpu->abOpcode[offOpcode + 3]);
1677 pIemCpu->offOpcode = offOpcode + 4;
1678 return VINF_SUCCESS;
1679}
1680
1681
1682/**
1683 * Fetches the next opcode dword, returns automatically on failure.
1684 *
1685 * @param a_pu32 Where to return the opcode dword.
1686 * @remark Implicitly references pIemCpu.
1687 */
1688#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1689 do \
1690 { \
1691 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1692 if (rcStrict2 != VINF_SUCCESS) \
1693 return rcStrict2; \
1694 } while (0)
1695
1696
1697/**
1698 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1699 *
1700 * @returns Strict VBox status code.
1701 * @param pIemCpu The IEM state.
1702 * @param pu32 Where to return the opcode dword.
1703 */
1704DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1705{
1706 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1707 if (rcStrict == VINF_SUCCESS)
1708 {
1709 uint8_t offOpcode = pIemCpu->offOpcode;
1710 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1711 pIemCpu->abOpcode[offOpcode + 1],
1712 pIemCpu->abOpcode[offOpcode + 2],
1713 pIemCpu->abOpcode[offOpcode + 3]);
1714 pIemCpu->offOpcode = offOpcode + 4;
1715 }
1716 else
1717 *pu64 = 0;
1718 return rcStrict;
1719}
1720
1721
1722/**
1723 * Fetches the next opcode dword, zero extending it to a quad word.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu64 Where to return the opcode quad word.
1728 */
1729DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1730{
1731 uint8_t const offOpcode = pIemCpu->offOpcode;
1732 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1733 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1734
1735 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 return VINF_SUCCESS;
1741}
1742
1743
1744/**
1745 * Fetches the next opcode dword and zero extends it to a quad word, returns
1746 * automatically on failure.
1747 *
1748 * @param a_pu64 Where to return the opcode quad word.
1749 * @remark Implicitly references pIemCpu.
1750 */
1751#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1752 do \
1753 { \
1754 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1755 if (rcStrict2 != VINF_SUCCESS) \
1756 return rcStrict2; \
1757 } while (0)
1758
1759
1760/**
1761 * Fetches the next signed double word from the opcode stream.
1762 *
1763 * @returns Strict VBox status code.
1764 * @param pIemCpu The IEM state.
1765 * @param pi32 Where to return the signed double word.
1766 */
1767DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1768{
1769 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1770}
1771
1772/**
1773 * Fetches the next signed double word from the opcode stream, returning
1774 * automatically on failure.
1775 *
1776 * @param pi32 Where to return the signed double word.
1777 * @remark Implicitly references pIemCpu.
1778 */
1779#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1780 do \
1781 { \
1782 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1783 if (rcStrict2 != VINF_SUCCESS) \
1784 return rcStrict2; \
1785 } while (0)
1786
1787
1788/**
1789 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1790 *
1791 * @returns Strict VBox status code.
1792 * @param pIemCpu The IEM state.
1793 * @param pu64 Where to return the opcode qword.
1794 */
1795DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1796{
1797 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1798 if (rcStrict == VINF_SUCCESS)
1799 {
1800 uint8_t offOpcode = pIemCpu->offOpcode;
1801 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1802 pIemCpu->abOpcode[offOpcode + 1],
1803 pIemCpu->abOpcode[offOpcode + 2],
1804 pIemCpu->abOpcode[offOpcode + 3]);
1805 pIemCpu->offOpcode = offOpcode + 4;
1806 }
1807 else
1808 *pu64 = 0;
1809 return rcStrict;
1810}
1811
1812
1813/**
1814 * Fetches the next opcode dword, sign extending it into a quad word.
1815 *
1816 * @returns Strict VBox status code.
1817 * @param pIemCpu The IEM state.
1818 * @param pu64 Where to return the opcode quad word.
1819 */
1820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1821{
1822 uint8_t const offOpcode = pIemCpu->offOpcode;
1823 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1824 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1825
1826 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1827 pIemCpu->abOpcode[offOpcode + 1],
1828 pIemCpu->abOpcode[offOpcode + 2],
1829 pIemCpu->abOpcode[offOpcode + 3]);
1830 *pu64 = i32;
1831 pIemCpu->offOpcode = offOpcode + 4;
1832 return VINF_SUCCESS;
1833}
1834
1835
1836/**
1837 * Fetches the next opcode double word and sign extends it to a quad word,
1838 * returns automatically on failure.
1839 *
1840 * @param a_pu64 Where to return the opcode quad word.
1841 * @remark Implicitly references pIemCpu.
1842 */
1843#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1844 do \
1845 { \
1846 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1847 if (rcStrict2 != VINF_SUCCESS) \
1848 return rcStrict2; \
1849 } while (0)
1850
1851
1852/**
1853 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1854 *
1855 * @returns Strict VBox status code.
1856 * @param pIemCpu The IEM state.
1857 * @param pu64 Where to return the opcode qword.
1858 */
1859DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1860{
1861 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1862 if (rcStrict == VINF_SUCCESS)
1863 {
1864 uint8_t offOpcode = pIemCpu->offOpcode;
1865 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1866 pIemCpu->abOpcode[offOpcode + 1],
1867 pIemCpu->abOpcode[offOpcode + 2],
1868 pIemCpu->abOpcode[offOpcode + 3],
1869 pIemCpu->abOpcode[offOpcode + 4],
1870 pIemCpu->abOpcode[offOpcode + 5],
1871 pIemCpu->abOpcode[offOpcode + 6],
1872 pIemCpu->abOpcode[offOpcode + 7]);
1873 pIemCpu->offOpcode = offOpcode + 8;
1874 }
1875 else
1876 *pu64 = 0;
1877 return rcStrict;
1878}
1879
1880
1881/**
1882 * Fetches the next opcode qword.
1883 *
1884 * @returns Strict VBox status code.
1885 * @param pIemCpu The IEM state.
1886 * @param pu64 Where to return the opcode qword.
1887 */
1888DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1889{
1890 uint8_t const offOpcode = pIemCpu->offOpcode;
1891 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1892 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1893
1894 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1895 pIemCpu->abOpcode[offOpcode + 1],
1896 pIemCpu->abOpcode[offOpcode + 2],
1897 pIemCpu->abOpcode[offOpcode + 3],
1898 pIemCpu->abOpcode[offOpcode + 4],
1899 pIemCpu->abOpcode[offOpcode + 5],
1900 pIemCpu->abOpcode[offOpcode + 6],
1901 pIemCpu->abOpcode[offOpcode + 7]);
1902 pIemCpu->offOpcode = offOpcode + 8;
1903 return VINF_SUCCESS;
1904}
1905
1906
1907/**
1908 * Fetches the next opcode quad word, returns automatically on failure.
1909 *
1910 * @param a_pu64 Where to return the opcode quad word.
1911 * @remark Implicitly references pIemCpu.
1912 */
1913#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1914 do \
1915 { \
1916 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1917 if (rcStrict2 != VINF_SUCCESS) \
1918 return rcStrict2; \
1919 } while (0)
1920
1921
1922/** @name Misc Worker Functions.
1923 * @{
1924 */
1925
1926
1927/**
1928 * Validates a new SS segment.
1929 *
1930 * @returns VBox strict status code.
1931 * @param pIemCpu The IEM per CPU instance data.
1932 * @param pCtx The CPU context.
1933 * @param NewSS The new SS selctor.
1934 * @param uCpl The CPL to load the stack for.
1935 * @param pDesc Where to return the descriptor.
1936 */
1937static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1938{
1939 NOREF(pCtx);
1940
1941 /* Null selectors are not allowed (we're not called for dispatching
1942 interrupts with SS=0 in long mode). */
1943 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1944 {
1945 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1946 return iemRaiseTaskSwitchFault0(pIemCpu);
1947 }
1948
1949 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1950 if ((NewSS & X86_SEL_RPL) != uCpl)
1951 {
1952 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1953 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1954 }
1955
1956 /*
1957 * Read the descriptor.
1958 */
1959 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1960 if (rcStrict != VINF_SUCCESS)
1961 return rcStrict;
1962
1963 /*
1964 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1965 */
1966 if (!pDesc->Legacy.Gen.u1DescType)
1967 {
1968 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1969 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1970 }
1971
1972 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1973 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1974 {
1975 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1976 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1977 }
1978 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1979 {
1980 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1981 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1982 }
1983
1984 /* Is it there? */
1985 /** @todo testcase: Is this checked before the canonical / limit check below? */
1986 if (!pDesc->Legacy.Gen.u1Present)
1987 {
1988 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1989 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1990 }
1991
1992 return VINF_SUCCESS;
1993}
1994
1995
1996/**
1997 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1998 * not.
1999 *
2000 * @param a_pIemCpu The IEM per CPU data.
2001 * @param a_pCtx The CPU context.
2002 */
2003#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2004# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2005 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2006 ? (a_pCtx)->eflags.u \
2007 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2008#else
2009# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2010 ( (a_pCtx)->eflags.u )
2011#endif
2012
2013/**
2014 * Updates the EFLAGS in the correct manner wrt. PATM.
2015 *
2016 * @param a_pIemCpu The IEM per CPU data.
2017 * @param a_pCtx The CPU context.
2018 */
2019#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2020# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2021 do { \
2022 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2023 (a_pCtx)->eflags.u = (a_fEfl); \
2024 else \
2025 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2026 } while (0)
2027#else
2028# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2029 do { \
2030 (a_pCtx)->eflags.u = (a_fEfl); \
2031 } while (0)
2032#endif
2033
2034
2035/** @} */
2036
2037/** @name Raising Exceptions.
2038 *
2039 * @{
2040 */
2041
2042/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2043 * @{ */
2044/** CPU exception. */
2045#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2046/** External interrupt (from PIC, APIC, whatever). */
2047#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2048/** Software interrupt (int or into, not bound).
2049 * Returns to the following instruction */
2050#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2051/** Takes an error code. */
2052#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2053/** Takes a CR2. */
2054#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2055/** Generated by the breakpoint instruction. */
2056#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2057/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2058#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2059/** @} */
2060
2061
2062/**
2063 * Loads the specified stack far pointer from the TSS.
2064 *
2065 * @returns VBox strict status code.
2066 * @param pIemCpu The IEM per CPU instance data.
2067 * @param pCtx The CPU context.
2068 * @param uCpl The CPL to load the stack for.
2069 * @param pSelSS Where to return the new stack segment.
2070 * @param puEsp Where to return the new stack pointer.
2071 */
2072static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2073 PRTSEL pSelSS, uint32_t *puEsp)
2074{
2075 VBOXSTRICTRC rcStrict;
2076 Assert(uCpl < 4);
2077 *puEsp = 0; /* make gcc happy */
2078 *pSelSS = 0; /* make gcc happy */
2079
2080 switch (pCtx->tr.Attr.n.u4Type)
2081 {
2082 /*
2083 * 16-bit TSS (X86TSS16).
2084 */
2085 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2086 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2087 {
2088 uint32_t off = uCpl * 4 + 2;
2089 if (off + 4 > pCtx->tr.u32Limit)
2090 {
2091 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2092 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2093 }
2094
2095 uint32_t u32Tmp = 0; /* gcc maybe... */
2096 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2097 if (rcStrict == VINF_SUCCESS)
2098 {
2099 *puEsp = RT_LOWORD(u32Tmp);
2100 *pSelSS = RT_HIWORD(u32Tmp);
2101 return VINF_SUCCESS;
2102 }
2103 break;
2104 }
2105
2106 /*
2107 * 32-bit TSS (X86TSS32).
2108 */
2109 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2110 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2111 {
2112 uint32_t off = uCpl * 8 + 4;
2113 if (off + 7 > pCtx->tr.u32Limit)
2114 {
2115 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2116 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2117 }
2118
2119 uint64_t u64Tmp;
2120 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2121 if (rcStrict == VINF_SUCCESS)
2122 {
2123 *puEsp = u64Tmp & UINT32_MAX;
2124 *pSelSS = (RTSEL)(u64Tmp >> 32);
2125 return VINF_SUCCESS;
2126 }
2127 break;
2128 }
2129
2130 default:
2131 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2132 }
2133 return rcStrict;
2134}
2135
2136
2137/**
2138 * Loads the specified stack pointer from the 64-bit TSS.
2139 *
2140 * @returns VBox strict status code.
2141 * @param pIemCpu The IEM per CPU instance data.
2142 * @param pCtx The CPU context.
2143 * @param uCpl The CPL to load the stack for.
2144 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2145 * @param puRsp Where to return the new stack pointer.
2146 */
2147static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2148 uint64_t *puRsp)
2149{
2150 Assert(uCpl < 4);
2151 Assert(uIst < 8);
2152 *puRsp = 0; /* make gcc happy */
2153
2154 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2155
2156 uint32_t off;
2157 if (uIst)
2158 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2159 else
2160 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2161 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2162 {
2163 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2164 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166
2167 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2168}
2169
2170
2171/**
2172 * Adjust the CPU state according to the exception being raised.
2173 *
2174 * @param pCtx The CPU context.
2175 * @param u8Vector The exception that has been raised.
2176 */
2177DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2178{
2179 switch (u8Vector)
2180 {
2181 case X86_XCPT_DB:
2182 pCtx->dr[7] &= ~X86_DR7_GD;
2183 break;
2184 /** @todo Read the AMD and Intel exception reference... */
2185 }
2186}
2187
2188
2189/**
2190 * Implements exceptions and interrupts for real mode.
2191 *
2192 * @returns VBox strict status code.
2193 * @param pIemCpu The IEM per CPU instance data.
2194 * @param pCtx The CPU context.
2195 * @param cbInstr The number of bytes to offset rIP by in the return
2196 * address.
2197 * @param u8Vector The interrupt / exception vector number.
2198 * @param fFlags The flags.
2199 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2200 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2201 */
2202static VBOXSTRICTRC
2203iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2204 PCPUMCTX pCtx,
2205 uint8_t cbInstr,
2206 uint8_t u8Vector,
2207 uint32_t fFlags,
2208 uint16_t uErr,
2209 uint64_t uCr2)
2210{
2211 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2212 NOREF(uErr); NOREF(uCr2);
2213
2214 /*
2215 * Read the IDT entry.
2216 */
2217 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2218 {
2219 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2220 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2221 }
2222 RTFAR16 Idte;
2223 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2224 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2225 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2226 return rcStrict;
2227
2228 /*
2229 * Push the stack frame.
2230 */
2231 uint16_t *pu16Frame;
2232 uint64_t uNewRsp;
2233 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2234 if (rcStrict != VINF_SUCCESS)
2235 return rcStrict;
2236
2237 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2238 pu16Frame[2] = (uint16_t)fEfl;
2239 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2240 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2241 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2242 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2243 return rcStrict;
2244
2245 /*
2246 * Load the vector address into cs:ip and make exception specific state
2247 * adjustments.
2248 */
2249 pCtx->cs.Sel = Idte.sel;
2250 pCtx->cs.ValidSel = Idte.sel;
2251 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2252 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2253 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2254 pCtx->rip = Idte.off;
2255 fEfl &= ~X86_EFL_IF;
2256 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2257
2258 /** @todo do we actually do this in real mode? */
2259 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2260 iemRaiseXcptAdjustState(pCtx, u8Vector);
2261
2262 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2263}
2264
2265
2266/**
2267 * Loads a NULL data selector into when coming from V8086 mode.
2268 *
2269 * @param pIemCpu The IEM per CPU instance data.
2270 * @param pSReg Pointer to the segment register.
2271 */
2272static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2273{
2274 pSReg->Sel = 0;
2275 pSReg->ValidSel = 0;
2276 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2277 {
2278 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2279 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2280 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2281 }
2282 else
2283 {
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 /** @todo check this on AMD-V */
2286 pSReg->u64Base = 0;
2287 pSReg->u32Limit = 0;
2288 }
2289}
2290
2291
2292/**
2293 * Loads a segment selector during a task switch in V8086 mode.
2294 *
2295 * @param pIemCpu The IEM per CPU instance data.
2296 * @param pSReg Pointer to the segment register.
2297 * @param uSel The selector value to load.
2298 */
2299static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2300{
2301 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2302 pSReg->Sel = uSel;
2303 pSReg->ValidSel = uSel;
2304 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2305 pSReg->u64Base = uSel << 4;
2306 pSReg->u32Limit = 0xffff;
2307 pSReg->Attr.u = 0xf3;
2308}
2309
2310
2311/**
2312 * Loads a NULL data selector into a selector register, both the hidden and
2313 * visible parts, in protected mode.
2314 *
2315 * @param pIemCpu The IEM state of the calling EMT.
2316 * @param pSReg Pointer to the segment register.
2317 * @param uRpl The RPL.
2318 */
2319static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2320{
2321 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2322 * data selector in protected mode. */
2323 pSReg->Sel = uRpl;
2324 pSReg->ValidSel = uRpl;
2325 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2326 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2327 {
2328 /* VT-x (Intel 3960x) observed doing something like this. */
2329 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2330 pSReg->u32Limit = UINT32_MAX;
2331 pSReg->u64Base = 0;
2332 }
2333 else
2334 {
2335 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2336 pSReg->u32Limit = 0;
2337 pSReg->u64Base = 0;
2338 }
2339}
2340
2341
2342/**
2343 * Loads a segment selector during a task switch in protected mode. In this task
2344 * switch scenario, we would throw #TS exceptions rather than #GPs.
2345 *
2346 * @returns VBox strict status code.
2347 * @param pIemCpu The IEM per CPU instance data.
2348 * @param pSReg Pointer to the segment register.
2349 * @param uSel The new selector value.
2350 *
2351 * @remarks This does -NOT- handle CS or SS.
2352 * @remarks This expects pIemCpu->uCpl to be up to date.
2353 */
2354static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2355{
2356 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2357
2358 /* Null data selector. */
2359 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2360 {
2361 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2363 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2364 return VINF_SUCCESS;
2365 }
2366
2367 /* Fetch the descriptor. */
2368 IEMSELDESC Desc;
2369 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2370 if (rcStrict != VINF_SUCCESS)
2371 {
2372 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2373 VBOXSTRICTRC_VAL(rcStrict)));
2374 return rcStrict;
2375 }
2376
2377 /* Must be a data segment or readable code segment. */
2378 if ( !Desc.Legacy.Gen.u1DescType
2379 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2380 {
2381 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2382 Desc.Legacy.Gen.u4Type));
2383 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2384 }
2385
2386 /* Check privileges for data segments and non-conforming code segments. */
2387 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2388 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2389 {
2390 /* The RPL and the new CPL must be less than or equal to the DPL. */
2391 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2392 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2393 {
2394 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2395 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2396 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2397 }
2398 }
2399
2400 /* Is it there? */
2401 if (!Desc.Legacy.Gen.u1Present)
2402 {
2403 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2404 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2405 }
2406
2407 /* The base and limit. */
2408 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2409 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2410
2411 /*
2412 * Ok, everything checked out fine. Now set the accessed bit before
2413 * committing the result into the registers.
2414 */
2415 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2416 {
2417 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2421 }
2422
2423 /* Commit */
2424 pSReg->Sel = uSel;
2425 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2426 pSReg->u32Limit = cbLimit;
2427 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2428 pSReg->ValidSel = uSel;
2429 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2430 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2431 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2432
2433 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2434 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2435 return VINF_SUCCESS;
2436}
2437
2438
2439/**
2440 * Performs a task switch.
2441 *
2442 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2443 * caller is responsible for performing the necessary checks (like DPL, TSS
2444 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2445 * reference for JMP, CALL, IRET.
2446 *
2447 * If the task switch is the due to a software interrupt or hardware exception,
2448 * the caller is responsible for validating the TSS selector and descriptor. See
2449 * Intel Instruction reference for INT n.
2450 *
2451 * @returns VBox strict status code.
2452 * @param pIemCpu The IEM per CPU instance data.
2453 * @param pCtx The CPU context.
2454 * @param enmTaskSwitch What caused this task switch.
2455 * @param uNextEip The EIP effective after the task switch.
2456 * @param fFlags The flags.
2457 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2458 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2459 * @param SelTSS The TSS selector of the new task.
2460 * @param pNewDescTSS Pointer to the new TSS descriptor.
2461 */
2462static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu,
2463 PCPUMCTX pCtx,
2464 IEMTASKSWITCH enmTaskSwitch,
2465 uint32_t uNextEip,
2466 uint32_t fFlags,
2467 uint16_t uErr,
2468 uint64_t uCr2,
2469 RTSEL SelTSS,
2470 PIEMSELDESC pNewDescTSS)
2471{
2472 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2473 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2474
2475 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2476 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2477 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2478 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2479 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2480
2481 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2482 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2483
2484 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2485 fIsNewTSS386, pCtx->eip, uNextEip));
2486
2487 /* Update CR2 in case it's a page-fault. */
2488 /** @todo This should probably be done much earlier in IEM/PGM. See
2489 * @bugref{5653} comment #49. */
2490 if (fFlags & IEM_XCPT_FLAGS_CR2)
2491 pCtx->cr2 = uCr2;
2492
2493 /*
2494 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2495 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2496 */
2497 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2498 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2499 if (uNewTSSLimit < uNewTSSLimitMin)
2500 {
2501 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2502 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2503 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2504 }
2505
2506 /*
2507 * Check the current TSS limit. The last written byte to the current TSS during the
2508 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2509 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2510 *
2511 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2512 * end up with smaller than "legal" TSS limits.
2513 */
2514 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2515 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2516 if (uCurTSSLimit < uCurTSSLimitMin)
2517 {
2518 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2519 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2520 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2521 }
2522
2523 /*
2524 * Verify that the new TSS can be accessed and map it. Map only the required contents
2525 * and not the entire TSS.
2526 */
2527 void *pvNewTSS;
2528 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2529 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2530 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2531 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2532 * not perform correct translation if this happens. See Intel spec. 7.2.1
2533 * "Task-State Segment" */
2534 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2535 if (rcStrict != VINF_SUCCESS)
2536 {
2537 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2538 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2539 return rcStrict;
2540 }
2541
2542 /*
2543 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2544 */
2545 uint32_t u32EFlags = pCtx->eflags.u32;
2546 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2547 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2548 {
2549 PX86DESC pDescCurTSS;
2550 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2551 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2552 if (rcStrict != VINF_SUCCESS)
2553 {
2554 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2555 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2556 return rcStrict;
2557 }
2558
2559 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2560 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2564 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567
2568 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2569 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2570 {
2571 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2572 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2573 u32EFlags &= ~X86_EFL_NT;
2574 }
2575 }
2576
2577 /*
2578 * Save the CPU state into the current TSS.
2579 */
2580 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2581 if (GCPtrNewTSS == GCPtrCurTSS)
2582 {
2583 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2584 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2585 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2586 }
2587 if (fIsNewTSS386)
2588 {
2589 /*
2590 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2591 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2592 */
2593 void *pvCurTSS32;
2594 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2595 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2596 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2597 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2598 if (rcStrict != VINF_SUCCESS)
2599 {
2600 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2601 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2602 return rcStrict;
2603 }
2604
2605 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2606 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2607 pCurTSS32->eip = uNextEip;
2608 pCurTSS32->eflags = u32EFlags;
2609 pCurTSS32->eax = pCtx->eax;
2610 pCurTSS32->ecx = pCtx->ecx;
2611 pCurTSS32->edx = pCtx->edx;
2612 pCurTSS32->ebx = pCtx->ebx;
2613 pCurTSS32->esp = pCtx->esp;
2614 pCurTSS32->ebp = pCtx->ebp;
2615 pCurTSS32->esi = pCtx->esi;
2616 pCurTSS32->edi = pCtx->edi;
2617 pCurTSS32->es = pCtx->es.Sel;
2618 pCurTSS32->cs = pCtx->cs.Sel;
2619 pCurTSS32->ss = pCtx->ss.Sel;
2620 pCurTSS32->ds = pCtx->ds.Sel;
2621 pCurTSS32->fs = pCtx->fs.Sel;
2622 pCurTSS32->gs = pCtx->gs.Sel;
2623
2624 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2625 if (rcStrict != VINF_SUCCESS)
2626 {
2627 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2628 VBOXSTRICTRC_VAL(rcStrict)));
2629 return rcStrict;
2630 }
2631 }
2632 else
2633 {
2634 /*
2635 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2636 */
2637 void *pvCurTSS16;
2638 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2639 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2640 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2641 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2645 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2646 return rcStrict;
2647 }
2648
2649 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2650 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2651 pCurTSS16->ip = uNextEip;
2652 pCurTSS16->flags = u32EFlags;
2653 pCurTSS16->ax = pCtx->ax;
2654 pCurTSS16->cx = pCtx->cx;
2655 pCurTSS16->dx = pCtx->dx;
2656 pCurTSS16->bx = pCtx->bx;
2657 pCurTSS16->sp = pCtx->sp;
2658 pCurTSS16->bp = pCtx->bp;
2659 pCurTSS16->si = pCtx->si;
2660 pCurTSS16->di = pCtx->di;
2661 pCurTSS16->es = pCtx->es.Sel;
2662 pCurTSS16->cs = pCtx->cs.Sel;
2663 pCurTSS16->ss = pCtx->ss.Sel;
2664 pCurTSS16->ds = pCtx->ds.Sel;
2665
2666 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2667 if (rcStrict != VINF_SUCCESS)
2668 {
2669 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2670 VBOXSTRICTRC_VAL(rcStrict)));
2671 return rcStrict;
2672 }
2673 }
2674
2675 /*
2676 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2677 */
2678 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2679 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2680 {
2681 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2682 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2683 pNewTSS->selPrev = pCtx->tr.Sel;
2684 }
2685
2686 /*
2687 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2688 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2689 */
2690 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2691 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2692 bool fNewDebugTrap;
2693 if (fIsNewTSS386)
2694 {
2695 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2696 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2697 uNewEip = pNewTSS32->eip;
2698 uNewEflags = pNewTSS32->eflags;
2699 uNewEax = pNewTSS32->eax;
2700 uNewEcx = pNewTSS32->ecx;
2701 uNewEdx = pNewTSS32->edx;
2702 uNewEbx = pNewTSS32->ebx;
2703 uNewEsp = pNewTSS32->esp;
2704 uNewEbp = pNewTSS32->ebp;
2705 uNewEsi = pNewTSS32->esi;
2706 uNewEdi = pNewTSS32->edi;
2707 uNewES = pNewTSS32->es;
2708 uNewCS = pNewTSS32->cs;
2709 uNewSS = pNewTSS32->ss;
2710 uNewDS = pNewTSS32->ds;
2711 uNewFS = pNewTSS32->fs;
2712 uNewGS = pNewTSS32->gs;
2713 uNewLdt = pNewTSS32->selLdt;
2714 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2715 }
2716 else
2717 {
2718 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2719 uNewCr3 = 0;
2720 uNewEip = pNewTSS16->ip;
2721 uNewEflags = pNewTSS16->flags;
2722 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2723 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2724 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2725 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2726 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2727 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2728 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2729 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2730 uNewES = pNewTSS16->es;
2731 uNewCS = pNewTSS16->cs;
2732 uNewSS = pNewTSS16->ss;
2733 uNewDS = pNewTSS16->ds;
2734 uNewFS = 0;
2735 uNewGS = 0;
2736 uNewLdt = pNewTSS16->selLdt;
2737 fNewDebugTrap = false;
2738 }
2739
2740 if (GCPtrNewTSS == GCPtrCurTSS)
2741 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2742 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2743
2744 /*
2745 * We're done accessing the new TSS.
2746 */
2747 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2748 if (rcStrict != VINF_SUCCESS)
2749 {
2750 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2751 return rcStrict;
2752 }
2753
2754 /*
2755 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2756 */
2757 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2758 {
2759 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2760 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2761 if (rcStrict != VINF_SUCCESS)
2762 {
2763 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2764 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2765 return rcStrict;
2766 }
2767
2768 /* Check that the descriptor indicates the new TSS is available (not busy). */
2769 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2770 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2771 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2772
2773 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2774 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2775 if (rcStrict != VINF_SUCCESS)
2776 {
2777 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2778 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2779 return rcStrict;
2780 }
2781 }
2782
2783 /*
2784 * From this point on, we're technically in the new task. We will defer exceptions
2785 * until the completion of the task switch but before executing any instructions in the new task.
2786 */
2787 pCtx->tr.Sel = SelTSS;
2788 pCtx->tr.ValidSel = SelTSS;
2789 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2790 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2791 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2792 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2793 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2794
2795 /* Set the busy bit in TR. */
2796 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2797 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2798 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2799 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2800 {
2801 uNewEflags |= X86_EFL_NT;
2802 }
2803
2804 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2805 pCtx->cr0 |= X86_CR0_TS;
2806 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2807
2808 pCtx->eip = uNewEip;
2809 pCtx->eax = uNewEax;
2810 pCtx->ecx = uNewEcx;
2811 pCtx->edx = uNewEdx;
2812 pCtx->ebx = uNewEbx;
2813 pCtx->esp = uNewEsp;
2814 pCtx->ebp = uNewEbp;
2815 pCtx->esi = uNewEsi;
2816 pCtx->edi = uNewEdi;
2817
2818 uNewEflags &= X86_EFL_LIVE_MASK;
2819 uNewEflags |= X86_EFL_RA1_MASK;
2820 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2821
2822 /*
2823 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2824 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2825 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2826 */
2827 pCtx->es.Sel = uNewES;
2828 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2829 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2830
2831 pCtx->cs.Sel = uNewCS;
2832 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2833 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2834
2835 pCtx->ss.Sel = uNewSS;
2836 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2837 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2838
2839 pCtx->ds.Sel = uNewDS;
2840 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2841 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2842
2843 pCtx->fs.Sel = uNewFS;
2844 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2845 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2846
2847 pCtx->gs.Sel = uNewGS;
2848 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2849 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2850 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2851
2852 pCtx->ldtr.Sel = uNewLdt;
2853 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2854 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2855 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2856
2857 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2858 {
2859 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2860 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2861 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2862 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2863 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2864 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2865 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2866 }
2867
2868 /*
2869 * Switch CR3 for the new task.
2870 */
2871 if ( fIsNewTSS386
2872 && (pCtx->cr0 & X86_CR0_PG))
2873 {
2874 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2875 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2876 {
2877 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2878 AssertRCSuccessReturn(rc, rc);
2879 }
2880 else
2881 pCtx->cr3 = uNewCr3;
2882
2883 /* Inform PGM. */
2884 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2885 {
2886 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2887 AssertRCReturn(rc, rc);
2888 /* ignore informational status codes */
2889 }
2890 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2891 }
2892
2893 /*
2894 * Switch LDTR for the new task.
2895 */
2896 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2897 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2898 else
2899 {
2900 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2901
2902 IEMSELDESC DescNewLdt;
2903 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2904 if (rcStrict != VINF_SUCCESS)
2905 {
2906 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2907 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2908 return rcStrict;
2909 }
2910 if ( !DescNewLdt.Legacy.Gen.u1Present
2911 || DescNewLdt.Legacy.Gen.u1DescType
2912 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2913 {
2914 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2915 uNewLdt, DescNewLdt.Legacy.u));
2916 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2917 }
2918
2919 pCtx->ldtr.ValidSel = uNewLdt;
2920 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2921 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2922 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2923 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2924 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2925 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2926 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2927 }
2928
2929 IEMSELDESC DescSS;
2930 if (IEM_IS_V86_MODE(pIemCpu))
2931 {
2932 pIemCpu->uCpl = 3;
2933 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2934 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2935 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2936 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2937 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2938 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2939 }
2940 else
2941 {
2942 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2943
2944 /*
2945 * Load the stack segment for the new task.
2946 */
2947 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2948 {
2949 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2950 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2951 }
2952
2953 /* Fetch the descriptor. */
2954 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2955 if (rcStrict != VINF_SUCCESS)
2956 {
2957 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2958 VBOXSTRICTRC_VAL(rcStrict)));
2959 return rcStrict;
2960 }
2961
2962 /* SS must be a data segment and writable. */
2963 if ( !DescSS.Legacy.Gen.u1DescType
2964 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2965 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2966 {
2967 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2968 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2969 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2970 }
2971
2972 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2973 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2974 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2975 {
2976 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2977 uNewCpl));
2978 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 /* Is it there? */
2982 if (!DescSS.Legacy.Gen.u1Present)
2983 {
2984 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2985 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2986 }
2987
2988 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2989 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2990
2991 /* Set the accessed bit before committing the result into SS. */
2992 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2993 {
2994 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2995 if (rcStrict != VINF_SUCCESS)
2996 return rcStrict;
2997 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2998 }
2999
3000 /* Commit SS. */
3001 pCtx->ss.Sel = uNewSS;
3002 pCtx->ss.ValidSel = uNewSS;
3003 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3004 pCtx->ss.u32Limit = cbLimit;
3005 pCtx->ss.u64Base = u64Base;
3006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3008
3009 /* CPL has changed, update IEM before loading rest of segments. */
3010 pIemCpu->uCpl = uNewCpl;
3011
3012 /*
3013 * Load the data segments for the new task.
3014 */
3015 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3019 if (rcStrict != VINF_SUCCESS)
3020 return rcStrict;
3021 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3022 if (rcStrict != VINF_SUCCESS)
3023 return rcStrict;
3024 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3025 if (rcStrict != VINF_SUCCESS)
3026 return rcStrict;
3027
3028 /*
3029 * Load the code segment for the new task.
3030 */
3031 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3032 {
3033 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3034 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3035 }
3036
3037 /* Fetch the descriptor. */
3038 IEMSELDESC DescCS;
3039 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3040 if (rcStrict != VINF_SUCCESS)
3041 {
3042 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3043 return rcStrict;
3044 }
3045
3046 /* CS must be a code segment. */
3047 if ( !DescCS.Legacy.Gen.u1DescType
3048 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3049 {
3050 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3051 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3052 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3053 }
3054
3055 /* For conforming CS, DPL must be less than or equal to the RPL. */
3056 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3057 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3058 {
3059 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3060 DescCS.Legacy.Gen.u2Dpl));
3061 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3062 }
3063
3064 /* For non-conforming CS, DPL must match RPL. */
3065 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3066 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3067 {
3068 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3069 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3070 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3071 }
3072
3073 /* Is it there? */
3074 if (!DescCS.Legacy.Gen.u1Present)
3075 {
3076 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3077 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3078 }
3079
3080 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3081 u64Base = X86DESC_BASE(&DescCS.Legacy);
3082
3083 /* Set the accessed bit before committing the result into CS. */
3084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3085 {
3086 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3087 if (rcStrict != VINF_SUCCESS)
3088 return rcStrict;
3089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3090 }
3091
3092 /* Commit CS. */
3093 pCtx->cs.Sel = uNewCS;
3094 pCtx->cs.ValidSel = uNewCS;
3095 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3096 pCtx->cs.u32Limit = cbLimit;
3097 pCtx->cs.u64Base = u64Base;
3098 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3099 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3100 }
3101
3102 /** @todo Debug trap. */
3103 if (fIsNewTSS386 && fNewDebugTrap)
3104 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3105
3106 /*
3107 * Construct the error code masks based on what caused this task switch.
3108 * See Intel Instruction reference for INT.
3109 */
3110 uint16_t uExt;
3111 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3112 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3113 {
3114 uExt = 1;
3115 }
3116 else
3117 uExt = 0;
3118
3119 /*
3120 * Push any error code on to the new stack.
3121 */
3122 if (fFlags & IEM_XCPT_FLAGS_ERR)
3123 {
3124 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3125 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3126 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3127
3128 /* Check that there is sufficient space on the stack. */
3129 /** @todo Factor out segment limit checking for normal/expand down segments
3130 * into a separate function. */
3131 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3132 {
3133 if ( pCtx->esp - 1 > cbLimitSS
3134 || pCtx->esp < cbStackFrame)
3135 {
3136 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3137 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3138 cbStackFrame));
3139 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3140 }
3141 }
3142 else
3143 {
3144 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3145 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3146 {
3147 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3148 cbStackFrame));
3149 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3150 }
3151 }
3152
3153
3154 if (fIsNewTSS386)
3155 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3156 else
3157 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3158 if (rcStrict != VINF_SUCCESS)
3159 {
3160 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3161 VBOXSTRICTRC_VAL(rcStrict)));
3162 return rcStrict;
3163 }
3164 }
3165
3166 /* Check the new EIP against the new CS limit. */
3167 if (pCtx->eip > pCtx->cs.u32Limit)
3168 {
3169 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3170 pCtx->eip, pCtx->cs.u32Limit));
3171 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3172 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3173 }
3174
3175 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3176 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3177}
3178
3179
3180/**
3181 * Implements exceptions and interrupts for protected mode.
3182 *
3183 * @returns VBox strict status code.
3184 * @param pIemCpu The IEM per CPU instance data.
3185 * @param pCtx The CPU context.
3186 * @param cbInstr The number of bytes to offset rIP by in the return
3187 * address.
3188 * @param u8Vector The interrupt / exception vector number.
3189 * @param fFlags The flags.
3190 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3191 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3192 */
3193static VBOXSTRICTRC
3194iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3195 PCPUMCTX pCtx,
3196 uint8_t cbInstr,
3197 uint8_t u8Vector,
3198 uint32_t fFlags,
3199 uint16_t uErr,
3200 uint64_t uCr2)
3201{
3202 NOREF(cbInstr);
3203
3204 /*
3205 * Read the IDT entry.
3206 */
3207 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3208 {
3209 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3210 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3211 }
3212 X86DESC Idte;
3213 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3214 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3215 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3216 return rcStrict;
3217 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3218 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3219 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3220
3221 /*
3222 * Check the descriptor type, DPL and such.
3223 * ASSUMES this is done in the same order as described for call-gate calls.
3224 */
3225 if (Idte.Gate.u1DescType)
3226 {
3227 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3228 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3229 }
3230 bool fTaskGate = false;
3231 uint8_t f32BitGate = true;
3232 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3233 switch (Idte.Gate.u4Type)
3234 {
3235 case X86_SEL_TYPE_SYS_UNDEFINED:
3236 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3237 case X86_SEL_TYPE_SYS_LDT:
3238 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3239 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3240 case X86_SEL_TYPE_SYS_UNDEFINED2:
3241 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3242 case X86_SEL_TYPE_SYS_UNDEFINED3:
3243 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3244 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3245 case X86_SEL_TYPE_SYS_UNDEFINED4:
3246 {
3247 /** @todo check what actually happens when the type is wrong...
3248 * esp. call gates. */
3249 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3250 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3251 }
3252
3253 case X86_SEL_TYPE_SYS_286_INT_GATE:
3254 f32BitGate = false;
3255 case X86_SEL_TYPE_SYS_386_INT_GATE:
3256 fEflToClear |= X86_EFL_IF;
3257 break;
3258
3259 case X86_SEL_TYPE_SYS_TASK_GATE:
3260 fTaskGate = true;
3261#ifndef IEM_IMPLEMENTS_TASKSWITCH
3262 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3263#endif
3264 break;
3265
3266 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3267 f32BitGate = false;
3268 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3269 break;
3270
3271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3272 }
3273
3274 /* Check DPL against CPL if applicable. */
3275 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3276 {
3277 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3280 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3281 }
3282 }
3283
3284 /* Is it there? */
3285 if (!Idte.Gate.u1Present)
3286 {
3287 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3288 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3289 }
3290
3291 /* Is it a task-gate? */
3292 if (fTaskGate)
3293 {
3294 /*
3295 * Construct the error code masks based on what caused this task switch.
3296 * See Intel Instruction reference for INT.
3297 */
3298 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3299 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3300 RTSEL SelTSS = Idte.Gate.u16Sel;
3301
3302 /*
3303 * Fetch the TSS descriptor in the GDT.
3304 */
3305 IEMSELDESC DescTSS;
3306 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3307 if (rcStrict != VINF_SUCCESS)
3308 {
3309 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3310 VBOXSTRICTRC_VAL(rcStrict)));
3311 return rcStrict;
3312 }
3313
3314 /* The TSS descriptor must be a system segment and be available (not busy). */
3315 if ( DescTSS.Legacy.Gen.u1DescType
3316 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3317 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3318 {
3319 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3320 u8Vector, SelTSS, DescTSS.Legacy.au64));
3321 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3322 }
3323
3324 /* The TSS must be present. */
3325 if (!DescTSS.Legacy.Gen.u1Present)
3326 {
3327 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3328 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3329 }
3330
3331 /* Do the actual task switch. */
3332 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3333 }
3334
3335 /* A null CS is bad. */
3336 RTSEL NewCS = Idte.Gate.u16Sel;
3337 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3338 {
3339 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3340 return iemRaiseGeneralProtectionFault0(pIemCpu);
3341 }
3342
3343 /* Fetch the descriptor for the new CS. */
3344 IEMSELDESC DescCS;
3345 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3346 if (rcStrict != VINF_SUCCESS)
3347 {
3348 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3349 return rcStrict;
3350 }
3351
3352 /* Must be a code segment. */
3353 if (!DescCS.Legacy.Gen.u1DescType)
3354 {
3355 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3356 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3357 }
3358 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3359 {
3360 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3361 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3362 }
3363
3364 /* Don't allow lowering the privilege level. */
3365 /** @todo Does the lowering of privileges apply to software interrupts
3366 * only? This has bearings on the more-privileged or
3367 * same-privilege stack behavior further down. A testcase would
3368 * be nice. */
3369 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3370 {
3371 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3372 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3373 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3374 }
3375
3376 /* Make sure the selector is present. */
3377 if (!DescCS.Legacy.Gen.u1Present)
3378 {
3379 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3380 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3381 }
3382
3383 /* Check the new EIP against the new CS limit. */
3384 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3385 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3386 ? Idte.Gate.u16OffsetLow
3387 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3388 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3389 if (uNewEip > cbLimitCS)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3392 u8Vector, uNewEip, cbLimitCS, NewCS));
3393 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3394 }
3395
3396 /* Calc the flag image to push. */
3397 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3398 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3399 fEfl &= ~X86_EFL_RF;
3400 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3401 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3402
3403 /* From V8086 mode only go to CPL 0. */
3404 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3405 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3406 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3407 {
3408 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3409 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3410 }
3411
3412 /*
3413 * If the privilege level changes, we need to get a new stack from the TSS.
3414 * This in turns means validating the new SS and ESP...
3415 */
3416 if (uNewCpl != pIemCpu->uCpl)
3417 {
3418 RTSEL NewSS;
3419 uint32_t uNewEsp;
3420 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423
3424 IEMSELDESC DescSS;
3425 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428
3429 /* Check that there is sufficient space for the stack frame. */
3430 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3431 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3432 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3433 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3434
3435 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3436 {
3437 if ( uNewEsp - 1 > cbLimitSS
3438 || uNewEsp < cbStackFrame)
3439 {
3440 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3441 u8Vector, NewSS, uNewEsp, cbStackFrame));
3442 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3443 }
3444 }
3445 else
3446 {
3447 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3448 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3449 {
3450 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3451 u8Vector, NewSS, uNewEsp, cbStackFrame));
3452 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3453 }
3454 }
3455
3456 /*
3457 * Start making changes.
3458 */
3459
3460 /* Create the stack frame. */
3461 RTPTRUNION uStackFrame;
3462 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3463 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3464 if (rcStrict != VINF_SUCCESS)
3465 return rcStrict;
3466 void * const pvStackFrame = uStackFrame.pv;
3467 if (f32BitGate)
3468 {
3469 if (fFlags & IEM_XCPT_FLAGS_ERR)
3470 *uStackFrame.pu32++ = uErr;
3471 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3472 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3473 uStackFrame.pu32[2] = fEfl;
3474 uStackFrame.pu32[3] = pCtx->esp;
3475 uStackFrame.pu32[4] = pCtx->ss.Sel;
3476 if (fEfl & X86_EFL_VM)
3477 {
3478 uStackFrame.pu32[1] = pCtx->cs.Sel;
3479 uStackFrame.pu32[5] = pCtx->es.Sel;
3480 uStackFrame.pu32[6] = pCtx->ds.Sel;
3481 uStackFrame.pu32[7] = pCtx->fs.Sel;
3482 uStackFrame.pu32[8] = pCtx->gs.Sel;
3483 }
3484 }
3485 else
3486 {
3487 if (fFlags & IEM_XCPT_FLAGS_ERR)
3488 *uStackFrame.pu16++ = uErr;
3489 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3490 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3491 uStackFrame.pu16[2] = fEfl;
3492 uStackFrame.pu16[3] = pCtx->sp;
3493 uStackFrame.pu16[4] = pCtx->ss.Sel;
3494 if (fEfl & X86_EFL_VM)
3495 {
3496 uStackFrame.pu16[1] = pCtx->cs.Sel;
3497 uStackFrame.pu16[5] = pCtx->es.Sel;
3498 uStackFrame.pu16[6] = pCtx->ds.Sel;
3499 uStackFrame.pu16[7] = pCtx->fs.Sel;
3500 uStackFrame.pu16[8] = pCtx->gs.Sel;
3501 }
3502 }
3503 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3504 if (rcStrict != VINF_SUCCESS)
3505 return rcStrict;
3506
3507 /* Mark the selectors 'accessed' (hope this is the correct time). */
3508 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3509 * after pushing the stack frame? (Write protect the gdt + stack to
3510 * find out.) */
3511 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3512 {
3513 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3514 if (rcStrict != VINF_SUCCESS)
3515 return rcStrict;
3516 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3517 }
3518
3519 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3520 {
3521 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3522 if (rcStrict != VINF_SUCCESS)
3523 return rcStrict;
3524 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3525 }
3526
3527 /*
3528 * Start comitting the register changes (joins with the DPL=CPL branch).
3529 */
3530 pCtx->ss.Sel = NewSS;
3531 pCtx->ss.ValidSel = NewSS;
3532 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3533 pCtx->ss.u32Limit = cbLimitSS;
3534 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3535 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3536 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3537 pIemCpu->uCpl = uNewCpl;
3538
3539 if (fEfl & X86_EFL_VM)
3540 {
3541 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3542 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3543 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3544 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3545 }
3546 }
3547 /*
3548 * Same privilege, no stack change and smaller stack frame.
3549 */
3550 else
3551 {
3552 uint64_t uNewRsp;
3553 RTPTRUNION uStackFrame;
3554 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3555 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3556 if (rcStrict != VINF_SUCCESS)
3557 return rcStrict;
3558 void * const pvStackFrame = uStackFrame.pv;
3559
3560 if (f32BitGate)
3561 {
3562 if (fFlags & IEM_XCPT_FLAGS_ERR)
3563 *uStackFrame.pu32++ = uErr;
3564 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3565 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3566 uStackFrame.pu32[2] = fEfl;
3567 }
3568 else
3569 {
3570 if (fFlags & IEM_XCPT_FLAGS_ERR)
3571 *uStackFrame.pu16++ = uErr;
3572 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3573 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3574 uStackFrame.pu16[2] = fEfl;
3575 }
3576 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3577 if (rcStrict != VINF_SUCCESS)
3578 return rcStrict;
3579
3580 /* Mark the CS selector as 'accessed'. */
3581 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3582 {
3583 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3584 if (rcStrict != VINF_SUCCESS)
3585 return rcStrict;
3586 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3587 }
3588
3589 /*
3590 * Start committing the register changes (joins with the other branch).
3591 */
3592 pCtx->rsp = uNewRsp;
3593 }
3594
3595 /* ... register committing continues. */
3596 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3597 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3598 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3599 pCtx->cs.u32Limit = cbLimitCS;
3600 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3601 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3602
3603 pCtx->rip = uNewEip;
3604 fEfl &= ~fEflToClear;
3605 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3606
3607 if (fFlags & IEM_XCPT_FLAGS_CR2)
3608 pCtx->cr2 = uCr2;
3609
3610 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3611 iemRaiseXcptAdjustState(pCtx, u8Vector);
3612
3613 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3614}
3615
3616
3617/**
3618 * Implements exceptions and interrupts for long mode.
3619 *
3620 * @returns VBox strict status code.
3621 * @param pIemCpu The IEM per CPU instance data.
3622 * @param pCtx The CPU context.
3623 * @param cbInstr The number of bytes to offset rIP by in the return
3624 * address.
3625 * @param u8Vector The interrupt / exception vector number.
3626 * @param fFlags The flags.
3627 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3628 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3629 */
3630static VBOXSTRICTRC
3631iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3632 PCPUMCTX pCtx,
3633 uint8_t cbInstr,
3634 uint8_t u8Vector,
3635 uint32_t fFlags,
3636 uint16_t uErr,
3637 uint64_t uCr2)
3638{
3639 NOREF(cbInstr);
3640
3641 /*
3642 * Read the IDT entry.
3643 */
3644 uint16_t offIdt = (uint16_t)u8Vector << 4;
3645 if (pCtx->idtr.cbIdt < offIdt + 7)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3648 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3649 }
3650 X86DESC64 Idte;
3651 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3652 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3653 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 return rcStrict;
3656 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3657 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3658 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3659
3660 /*
3661 * Check the descriptor type, DPL and such.
3662 * ASSUMES this is done in the same order as described for call-gate calls.
3663 */
3664 if (Idte.Gate.u1DescType)
3665 {
3666 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3667 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3668 }
3669 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3670 switch (Idte.Gate.u4Type)
3671 {
3672 case AMD64_SEL_TYPE_SYS_INT_GATE:
3673 fEflToClear |= X86_EFL_IF;
3674 break;
3675 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3676 break;
3677
3678 default:
3679 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3680 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3681 }
3682
3683 /* Check DPL against CPL if applicable. */
3684 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3685 {
3686 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3687 {
3688 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3689 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3690 }
3691 }
3692
3693 /* Is it there? */
3694 if (!Idte.Gate.u1Present)
3695 {
3696 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3697 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3698 }
3699
3700 /* A null CS is bad. */
3701 RTSEL NewCS = Idte.Gate.u16Sel;
3702 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3703 {
3704 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3705 return iemRaiseGeneralProtectionFault0(pIemCpu);
3706 }
3707
3708 /* Fetch the descriptor for the new CS. */
3709 IEMSELDESC DescCS;
3710 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3711 if (rcStrict != VINF_SUCCESS)
3712 {
3713 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3714 return rcStrict;
3715 }
3716
3717 /* Must be a 64-bit code segment. */
3718 if (!DescCS.Long.Gen.u1DescType)
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3721 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3722 }
3723 if ( !DescCS.Long.Gen.u1Long
3724 || DescCS.Long.Gen.u1DefBig
3725 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3728 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3729 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3730 }
3731
3732 /* Don't allow lowering the privilege level. For non-conforming CS
3733 selectors, the CS.DPL sets the privilege level the trap/interrupt
3734 handler runs at. For conforming CS selectors, the CPL remains
3735 unchanged, but the CS.DPL must be <= CPL. */
3736 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3737 * when CPU in Ring-0. Result \#GP? */
3738 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3739 {
3740 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3741 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3742 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3743 }
3744
3745
3746 /* Make sure the selector is present. */
3747 if (!DescCS.Legacy.Gen.u1Present)
3748 {
3749 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3750 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3751 }
3752
3753 /* Check that the new RIP is canonical. */
3754 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3755 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3756 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3757 if (!IEM_IS_CANONICAL(uNewRip))
3758 {
3759 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3760 return iemRaiseGeneralProtectionFault0(pIemCpu);
3761 }
3762
3763 /*
3764 * If the privilege level changes or if the IST isn't zero, we need to get
3765 * a new stack from the TSS.
3766 */
3767 uint64_t uNewRsp;
3768 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3769 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3770 if ( uNewCpl != pIemCpu->uCpl
3771 || Idte.Gate.u3IST != 0)
3772 {
3773 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3774 if (rcStrict != VINF_SUCCESS)
3775 return rcStrict;
3776 }
3777 else
3778 uNewRsp = pCtx->rsp;
3779 uNewRsp &= ~(uint64_t)0xf;
3780
3781 /*
3782 * Calc the flag image to push.
3783 */
3784 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3785 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3786 fEfl &= ~X86_EFL_RF;
3787 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3788 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3789
3790 /*
3791 * Start making changes.
3792 */
3793
3794 /* Create the stack frame. */
3795 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3796 RTPTRUNION uStackFrame;
3797 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3798 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3799 if (rcStrict != VINF_SUCCESS)
3800 return rcStrict;
3801 void * const pvStackFrame = uStackFrame.pv;
3802
3803 if (fFlags & IEM_XCPT_FLAGS_ERR)
3804 *uStackFrame.pu64++ = uErr;
3805 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3806 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3807 uStackFrame.pu64[2] = fEfl;
3808 uStackFrame.pu64[3] = pCtx->rsp;
3809 uStackFrame.pu64[4] = pCtx->ss.Sel;
3810 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3811 if (rcStrict != VINF_SUCCESS)
3812 return rcStrict;
3813
3814 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3815 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3816 * after pushing the stack frame? (Write protect the gdt + stack to
3817 * find out.) */
3818 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3819 {
3820 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3821 if (rcStrict != VINF_SUCCESS)
3822 return rcStrict;
3823 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3824 }
3825
3826 /*
3827 * Start comitting the register changes.
3828 */
3829 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3830 * hidden registers when interrupting 32-bit or 16-bit code! */
3831 if (uNewCpl != pIemCpu->uCpl)
3832 {
3833 pCtx->ss.Sel = 0 | uNewCpl;
3834 pCtx->ss.ValidSel = 0 | uNewCpl;
3835 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3836 pCtx->ss.u32Limit = UINT32_MAX;
3837 pCtx->ss.u64Base = 0;
3838 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3839 }
3840 pCtx->rsp = uNewRsp - cbStackFrame;
3841 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3842 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3843 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3844 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3845 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3846 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3847 pCtx->rip = uNewRip;
3848 pIemCpu->uCpl = uNewCpl;
3849
3850 fEfl &= ~fEflToClear;
3851 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3852
3853 if (fFlags & IEM_XCPT_FLAGS_CR2)
3854 pCtx->cr2 = uCr2;
3855
3856 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3857 iemRaiseXcptAdjustState(pCtx, u8Vector);
3858
3859 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3860}
3861
3862
3863/**
3864 * Implements exceptions and interrupts.
3865 *
3866 * All exceptions and interrupts goes thru this function!
3867 *
3868 * @returns VBox strict status code.
3869 * @param pIemCpu The IEM per CPU instance data.
3870 * @param cbInstr The number of bytes to offset rIP by in the return
3871 * address.
3872 * @param u8Vector The interrupt / exception vector number.
3873 * @param fFlags The flags.
3874 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3875 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3876 */
3877DECL_NO_INLINE(static, VBOXSTRICTRC)
3878iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3879 uint8_t cbInstr,
3880 uint8_t u8Vector,
3881 uint32_t fFlags,
3882 uint16_t uErr,
3883 uint64_t uCr2)
3884{
3885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3886
3887 /*
3888 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3889 */
3890 if ( pCtx->eflags.Bits.u1VM
3891 && pCtx->eflags.Bits.u2IOPL != 3
3892 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pCtx->cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900#ifdef DBGFTRACE_ENABLED
3901 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3902 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3903 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3904#endif
3905
3906 /*
3907 * Do recursion accounting.
3908 */
3909 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3910 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3911 if (pIemCpu->cXcptRecursions == 0)
3912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3913 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3914 else
3915 {
3916 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3917 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3918
3919 /** @todo double and tripple faults. */
3920 if (pIemCpu->cXcptRecursions >= 3)
3921 {
3922#ifdef DEBUG_bird
3923 AssertFailed();
3924#endif
3925 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3926 }
3927
3928 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3929 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3930 {
3931 ....
3932 } */
3933 }
3934 pIemCpu->cXcptRecursions++;
3935 pIemCpu->uCurXcpt = u8Vector;
3936 pIemCpu->fCurXcpt = fFlags;
3937
3938 /*
3939 * Extensive logging.
3940 */
3941#if defined(LOG_ENABLED) && defined(IN_RING3)
3942 if (LogIs3Enabled())
3943 {
3944 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3945 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3946 char szRegs[4096];
3947 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3948 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3949 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3950 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3951 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3952 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3953 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3954 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3955 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3956 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3957 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3958 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3959 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3960 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3961 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3962 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3963 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3964 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3965 " efer=%016VR{efer}\n"
3966 " pat=%016VR{pat}\n"
3967 " sf_mask=%016VR{sf_mask}\n"
3968 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3969 " lstar=%016VR{lstar}\n"
3970 " star=%016VR{star} cstar=%016VR{cstar}\n"
3971 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3972 );
3973
3974 char szInstr[256];
3975 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3976 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3977 szInstr, sizeof(szInstr), NULL);
3978 Log3(("%s%s\n", szRegs, szInstr));
3979 }
3980#endif /* LOG_ENABLED */
3981
3982 /*
3983 * Call the mode specific worker function.
3984 */
3985 VBOXSTRICTRC rcStrict;
3986 if (!(pCtx->cr0 & X86_CR0_PE))
3987 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3988 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3989 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3990 else
3991 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3992
3993 /*
3994 * Unwind.
3995 */
3996 pIemCpu->cXcptRecursions--;
3997 pIemCpu->uCurXcpt = uPrevXcpt;
3998 pIemCpu->fCurXcpt = fPrevXcpt;
3999 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4000 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4001 return rcStrict;
4002}
4003
4004
4005/** \#DE - 00. */
4006DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4007{
4008 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4009}
4010
4011
4012/** \#DB - 01.
4013 * @note This automatically clear DR7.GD. */
4014DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4015{
4016 /** @todo set/clear RF. */
4017 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4018 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4019}
4020
4021
4022/** \#UD - 06. */
4023DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4024{
4025 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4026}
4027
4028
4029/** \#NM - 07. */
4030DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4031{
4032 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4033}
4034
4035
4036/** \#TS(err) - 0a. */
4037DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4038{
4039 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4040}
4041
4042
4043/** \#TS(tr) - 0a. */
4044DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4045{
4046 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4047 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4048}
4049
4050
4051/** \#TS(0) - 0a. */
4052DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4053{
4054 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4055 0, 0);
4056}
4057
4058
4059/** \#TS(err) - 0a. */
4060DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4061{
4062 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4063 uSel & X86_SEL_MASK_OFF_RPL, 0);
4064}
4065
4066
4067/** \#NP(err) - 0b. */
4068DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4069{
4070 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4071}
4072
4073
4074/** \#NP(seg) - 0b. */
4075DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4076{
4077 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4078 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4079}
4080
4081
4082/** \#NP(sel) - 0b. */
4083DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4084{
4085 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4086 uSel & ~X86_SEL_RPL, 0);
4087}
4088
4089
4090/** \#SS(seg) - 0c. */
4091DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4092{
4093 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4094 uSel & ~X86_SEL_RPL, 0);
4095}
4096
4097
4098/** \#SS(err) - 0c. */
4099DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4100{
4101 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4102}
4103
4104
4105/** \#GP(n) - 0d. */
4106DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4107{
4108 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4109}
4110
4111
4112/** \#GP(0) - 0d. */
4113DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4114{
4115 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4116}
4117
4118
4119/** \#GP(sel) - 0d. */
4120DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4121{
4122 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4123 Sel & ~X86_SEL_RPL, 0);
4124}
4125
4126
4127/** \#GP(0) - 0d. */
4128DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4129{
4130 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4131}
4132
4133
4134/** \#GP(sel) - 0d. */
4135DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4136{
4137 NOREF(iSegReg); NOREF(fAccess);
4138 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4139 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4140}
4141
4142
4143/** \#GP(sel) - 0d. */
4144DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4145{
4146 NOREF(Sel);
4147 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4148}
4149
4150
4151/** \#GP(sel) - 0d. */
4152DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4153{
4154 NOREF(iSegReg); NOREF(fAccess);
4155 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4156}
4157
4158
4159/** \#PF(n) - 0e. */
4160DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4161{
4162 uint16_t uErr;
4163 switch (rc)
4164 {
4165 case VERR_PAGE_NOT_PRESENT:
4166 case VERR_PAGE_TABLE_NOT_PRESENT:
4167 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4168 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4169 uErr = 0;
4170 break;
4171
4172 default:
4173 AssertMsgFailed(("%Rrc\n", rc));
4174 case VERR_ACCESS_DENIED:
4175 uErr = X86_TRAP_PF_P;
4176 break;
4177
4178 /** @todo reserved */
4179 }
4180
4181 if (pIemCpu->uCpl == 3)
4182 uErr |= X86_TRAP_PF_US;
4183
4184 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4185 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4186 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4187 uErr |= X86_TRAP_PF_ID;
4188
4189#if 0 /* This is so much non-sense, really. Why was it done like that? */
4190 /* Note! RW access callers reporting a WRITE protection fault, will clear
4191 the READ flag before calling. So, read-modify-write accesses (RW)
4192 can safely be reported as READ faults. */
4193 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4194 uErr |= X86_TRAP_PF_RW;
4195#else
4196 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4197 {
4198 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4199 uErr |= X86_TRAP_PF_RW;
4200 }
4201#endif
4202
4203 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4204 uErr, GCPtrWhere);
4205}
4206
4207
4208/** \#MF(0) - 10. */
4209DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4210{
4211 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4212}
4213
4214
4215/** \#AC(0) - 11. */
4216DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4217{
4218 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4219}
4220
4221
4222/**
4223 * Macro for calling iemCImplRaiseDivideError().
4224 *
4225 * This enables us to add/remove arguments and force different levels of
4226 * inlining as we wish.
4227 *
4228 * @return Strict VBox status code.
4229 */
4230#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4231IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4232{
4233 NOREF(cbInstr);
4234 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4235}
4236
4237
4238/**
4239 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4240 *
4241 * This enables us to add/remove arguments and force different levels of
4242 * inlining as we wish.
4243 *
4244 * @return Strict VBox status code.
4245 */
4246#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4247IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4248{
4249 NOREF(cbInstr);
4250 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4251}
4252
4253
4254/**
4255 * Macro for calling iemCImplRaiseInvalidOpcode().
4256 *
4257 * This enables us to add/remove arguments and force different levels of
4258 * inlining as we wish.
4259 *
4260 * @return Strict VBox status code.
4261 */
4262#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4263IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4264{
4265 NOREF(cbInstr);
4266 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4267}
4268
4269
4270/** @} */
4271
4272
4273/*
4274 *
4275 * Helpers routines.
4276 * Helpers routines.
4277 * Helpers routines.
4278 *
4279 */
4280
4281/**
4282 * Recalculates the effective operand size.
4283 *
4284 * @param pIemCpu The IEM state.
4285 */
4286static void iemRecalEffOpSize(PIEMCPU pIemCpu)
4287{
4288 switch (pIemCpu->enmCpuMode)
4289 {
4290 case IEMMODE_16BIT:
4291 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4292 break;
4293 case IEMMODE_32BIT:
4294 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4295 break;
4296 case IEMMODE_64BIT:
4297 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4298 {
4299 case 0:
4300 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4301 break;
4302 case IEM_OP_PRF_SIZE_OP:
4303 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4304 break;
4305 case IEM_OP_PRF_SIZE_REX_W:
4306 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4307 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4308 break;
4309 }
4310 break;
4311 default:
4312 AssertFailed();
4313 }
4314}
4315
4316
4317/**
4318 * Sets the default operand size to 64-bit and recalculates the effective
4319 * operand size.
4320 *
4321 * @param pIemCpu The IEM state.
4322 */
4323static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4324{
4325 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4326 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4327 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4328 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4329 else
4330 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4331}
4332
4333
4334/*
4335 *
4336 * Common opcode decoders.
4337 * Common opcode decoders.
4338 * Common opcode decoders.
4339 *
4340 */
4341//#include <iprt/mem.h>
4342
4343/**
4344 * Used to add extra details about a stub case.
4345 * @param pIemCpu The IEM per CPU state.
4346 */
4347static void iemOpStubMsg2(PIEMCPU pIemCpu)
4348{
4349#if defined(LOG_ENABLED) && defined(IN_RING3)
4350 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4351 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4352 char szRegs[4096];
4353 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4354 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4355 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4356 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4357 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4358 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4359 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4360 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4361 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4362 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4363 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4364 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4365 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4366 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4367 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4368 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4369 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4370 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4371 " efer=%016VR{efer}\n"
4372 " pat=%016VR{pat}\n"
4373 " sf_mask=%016VR{sf_mask}\n"
4374 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4375 " lstar=%016VR{lstar}\n"
4376 " star=%016VR{star} cstar=%016VR{cstar}\n"
4377 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4378 );
4379
4380 char szInstr[256];
4381 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4382 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4383 szInstr, sizeof(szInstr), NULL);
4384
4385 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4386#else
4387 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4388#endif
4389}
4390
4391/**
4392 * Complains about a stub.
4393 *
4394 * Providing two versions of this macro, one for daily use and one for use when
4395 * working on IEM.
4396 */
4397#if 0
4398# define IEMOP_BITCH_ABOUT_STUB() \
4399 do { \
4400 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4401 iemOpStubMsg2(pIemCpu); \
4402 RTAssertPanic(); \
4403 } while (0)
4404#else
4405# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4406#endif
4407
4408/** Stubs an opcode. */
4409#define FNIEMOP_STUB(a_Name) \
4410 FNIEMOP_DEF(a_Name) \
4411 { \
4412 IEMOP_BITCH_ABOUT_STUB(); \
4413 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4414 } \
4415 typedef int ignore_semicolon
4416
4417/** Stubs an opcode. */
4418#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4419 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4420 { \
4421 IEMOP_BITCH_ABOUT_STUB(); \
4422 NOREF(a_Name0); \
4423 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4424 } \
4425 typedef int ignore_semicolon
4426
4427/** Stubs an opcode which currently should raise \#UD. */
4428#define FNIEMOP_UD_STUB(a_Name) \
4429 FNIEMOP_DEF(a_Name) \
4430 { \
4431 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4432 return IEMOP_RAISE_INVALID_OPCODE(); \
4433 } \
4434 typedef int ignore_semicolon
4435
4436/** Stubs an opcode which currently should raise \#UD. */
4437#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4438 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4439 { \
4440 NOREF(a_Name0); \
4441 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4442 return IEMOP_RAISE_INVALID_OPCODE(); \
4443 } \
4444 typedef int ignore_semicolon
4445
4446
4447
4448/** @name Register Access.
4449 * @{
4450 */
4451
4452/**
4453 * Gets a reference (pointer) to the specified hidden segment register.
4454 *
4455 * @returns Hidden register reference.
4456 * @param pIemCpu The per CPU data.
4457 * @param iSegReg The segment register.
4458 */
4459static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4460{
4461 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4462 PCPUMSELREG pSReg;
4463 switch (iSegReg)
4464 {
4465 case X86_SREG_ES: pSReg = &pCtx->es; break;
4466 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4467 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4468 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4469 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4470 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4471 default:
4472 AssertFailedReturn(NULL);
4473 }
4474#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4475 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4476 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4477#else
4478 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4479#endif
4480 return pSReg;
4481}
4482
4483
4484/**
4485 * Gets a reference (pointer) to the specified segment register (the selector
4486 * value).
4487 *
4488 * @returns Pointer to the selector variable.
4489 * @param pIemCpu The per CPU data.
4490 * @param iSegReg The segment register.
4491 */
4492static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4493{
4494 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4495 switch (iSegReg)
4496 {
4497 case X86_SREG_ES: return &pCtx->es.Sel;
4498 case X86_SREG_CS: return &pCtx->cs.Sel;
4499 case X86_SREG_SS: return &pCtx->ss.Sel;
4500 case X86_SREG_DS: return &pCtx->ds.Sel;
4501 case X86_SREG_FS: return &pCtx->fs.Sel;
4502 case X86_SREG_GS: return &pCtx->gs.Sel;
4503 }
4504 AssertFailedReturn(NULL);
4505}
4506
4507
4508/**
4509 * Fetches the selector value of a segment register.
4510 *
4511 * @returns The selector value.
4512 * @param pIemCpu The per CPU data.
4513 * @param iSegReg The segment register.
4514 */
4515static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4516{
4517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4518 switch (iSegReg)
4519 {
4520 case X86_SREG_ES: return pCtx->es.Sel;
4521 case X86_SREG_CS: return pCtx->cs.Sel;
4522 case X86_SREG_SS: return pCtx->ss.Sel;
4523 case X86_SREG_DS: return pCtx->ds.Sel;
4524 case X86_SREG_FS: return pCtx->fs.Sel;
4525 case X86_SREG_GS: return pCtx->gs.Sel;
4526 }
4527 AssertFailedReturn(0xffff);
4528}
4529
4530
4531/**
4532 * Gets a reference (pointer) to the specified general register.
4533 *
4534 * @returns Register reference.
4535 * @param pIemCpu The per CPU data.
4536 * @param iReg The general register.
4537 */
4538static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4539{
4540 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4541 switch (iReg)
4542 {
4543 case X86_GREG_xAX: return &pCtx->rax;
4544 case X86_GREG_xCX: return &pCtx->rcx;
4545 case X86_GREG_xDX: return &pCtx->rdx;
4546 case X86_GREG_xBX: return &pCtx->rbx;
4547 case X86_GREG_xSP: return &pCtx->rsp;
4548 case X86_GREG_xBP: return &pCtx->rbp;
4549 case X86_GREG_xSI: return &pCtx->rsi;
4550 case X86_GREG_xDI: return &pCtx->rdi;
4551 case X86_GREG_x8: return &pCtx->r8;
4552 case X86_GREG_x9: return &pCtx->r9;
4553 case X86_GREG_x10: return &pCtx->r10;
4554 case X86_GREG_x11: return &pCtx->r11;
4555 case X86_GREG_x12: return &pCtx->r12;
4556 case X86_GREG_x13: return &pCtx->r13;
4557 case X86_GREG_x14: return &pCtx->r14;
4558 case X86_GREG_x15: return &pCtx->r15;
4559 }
4560 AssertFailedReturn(NULL);
4561}
4562
4563
4564/**
4565 * Gets a reference (pointer) to the specified 8-bit general register.
4566 *
4567 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4568 *
4569 * @returns Register reference.
4570 * @param pIemCpu The per CPU data.
4571 * @param iReg The register.
4572 */
4573static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4574{
4575 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4576 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4577
4578 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4579 if (iReg >= 4)
4580 pu8Reg++;
4581 return pu8Reg;
4582}
4583
4584
4585/**
4586 * Fetches the value of a 8-bit general register.
4587 *
4588 * @returns The register value.
4589 * @param pIemCpu The per CPU data.
4590 * @param iReg The register.
4591 */
4592static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4593{
4594 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4595 return *pbSrc;
4596}
4597
4598
4599/**
4600 * Fetches the value of a 16-bit general register.
4601 *
4602 * @returns The register value.
4603 * @param pIemCpu The per CPU data.
4604 * @param iReg The register.
4605 */
4606static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4607{
4608 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4609}
4610
4611
4612/**
4613 * Fetches the value of a 32-bit general register.
4614 *
4615 * @returns The register value.
4616 * @param pIemCpu The per CPU data.
4617 * @param iReg The register.
4618 */
4619static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4620{
4621 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4622}
4623
4624
4625/**
4626 * Fetches the value of a 64-bit general register.
4627 *
4628 * @returns The register value.
4629 * @param pIemCpu The per CPU data.
4630 * @param iReg The register.
4631 */
4632static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4633{
4634 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4635}
4636
4637
4638/**
4639 * Is the FPU state in FXSAVE format or not.
4640 *
4641 * @returns true if it is, false if it's in FNSAVE.
4642 * @param pVCpu Pointer to the VMCPU.
4643 */
4644DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
4645{
4646#ifdef RT_ARCH_AMD64
4647 NOREF(pIemCpu);
4648 return true;
4649#else
4650 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
4651 return true;
4652#endif
4653}
4654
4655
4656/**
4657 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4658 *
4659 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4660 * segment limit.
4661 *
4662 * @param pIemCpu The per CPU data.
4663 * @param offNextInstr The offset of the next instruction.
4664 */
4665static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4666{
4667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4668 switch (pIemCpu->enmEffOpSize)
4669 {
4670 case IEMMODE_16BIT:
4671 {
4672 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4673 if ( uNewIp > pCtx->cs.u32Limit
4674 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4675 return iemRaiseGeneralProtectionFault0(pIemCpu);
4676 pCtx->rip = uNewIp;
4677 break;
4678 }
4679
4680 case IEMMODE_32BIT:
4681 {
4682 Assert(pCtx->rip <= UINT32_MAX);
4683 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4684
4685 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4686 if (uNewEip > pCtx->cs.u32Limit)
4687 return iemRaiseGeneralProtectionFault0(pIemCpu);
4688 pCtx->rip = uNewEip;
4689 break;
4690 }
4691
4692 case IEMMODE_64BIT:
4693 {
4694 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4695
4696 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4697 if (!IEM_IS_CANONICAL(uNewRip))
4698 return iemRaiseGeneralProtectionFault0(pIemCpu);
4699 pCtx->rip = uNewRip;
4700 break;
4701 }
4702
4703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4704 }
4705
4706 pCtx->eflags.Bits.u1RF = 0;
4707 return VINF_SUCCESS;
4708}
4709
4710
4711/**
4712 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4713 *
4714 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4715 * segment limit.
4716 *
4717 * @returns Strict VBox status code.
4718 * @param pIemCpu The per CPU data.
4719 * @param offNextInstr The offset of the next instruction.
4720 */
4721static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4722{
4723 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4724 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4725
4726 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4727 if ( uNewIp > pCtx->cs.u32Limit
4728 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4729 return iemRaiseGeneralProtectionFault0(pIemCpu);
4730 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4731 pCtx->rip = uNewIp;
4732 pCtx->eflags.Bits.u1RF = 0;
4733
4734 return VINF_SUCCESS;
4735}
4736
4737
4738/**
4739 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4740 *
4741 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4742 * segment limit.
4743 *
4744 * @returns Strict VBox status code.
4745 * @param pIemCpu The per CPU data.
4746 * @param offNextInstr The offset of the next instruction.
4747 */
4748static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4749{
4750 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4751 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4752
4753 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4754 {
4755 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4756
4757 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4758 if (uNewEip > pCtx->cs.u32Limit)
4759 return iemRaiseGeneralProtectionFault0(pIemCpu);
4760 pCtx->rip = uNewEip;
4761 }
4762 else
4763 {
4764 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4765
4766 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4767 if (!IEM_IS_CANONICAL(uNewRip))
4768 return iemRaiseGeneralProtectionFault0(pIemCpu);
4769 pCtx->rip = uNewRip;
4770 }
4771 pCtx->eflags.Bits.u1RF = 0;
4772 return VINF_SUCCESS;
4773}
4774
4775
4776/**
4777 * Performs a near jump to the specified address.
4778 *
4779 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4780 * segment limit.
4781 *
4782 * @param pIemCpu The per CPU data.
4783 * @param uNewRip The new RIP value.
4784 */
4785static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4786{
4787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4788 switch (pIemCpu->enmEffOpSize)
4789 {
4790 case IEMMODE_16BIT:
4791 {
4792 Assert(uNewRip <= UINT16_MAX);
4793 if ( uNewRip > pCtx->cs.u32Limit
4794 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4795 return iemRaiseGeneralProtectionFault0(pIemCpu);
4796 /** @todo Test 16-bit jump in 64-bit mode. */
4797 pCtx->rip = uNewRip;
4798 break;
4799 }
4800
4801 case IEMMODE_32BIT:
4802 {
4803 Assert(uNewRip <= UINT32_MAX);
4804 Assert(pCtx->rip <= UINT32_MAX);
4805 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4806
4807 if (uNewRip > pCtx->cs.u32Limit)
4808 return iemRaiseGeneralProtectionFault0(pIemCpu);
4809 pCtx->rip = uNewRip;
4810 break;
4811 }
4812
4813 case IEMMODE_64BIT:
4814 {
4815 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4816
4817 if (!IEM_IS_CANONICAL(uNewRip))
4818 return iemRaiseGeneralProtectionFault0(pIemCpu);
4819 pCtx->rip = uNewRip;
4820 break;
4821 }
4822
4823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4824 }
4825
4826 pCtx->eflags.Bits.u1RF = 0;
4827 return VINF_SUCCESS;
4828}
4829
4830
4831/**
4832 * Get the address of the top of the stack.
4833 *
4834 * @param pIemCpu The per CPU data.
4835 * @param pCtx The CPU context which SP/ESP/RSP should be
4836 * read.
4837 */
4838DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4839{
4840 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4841 return pCtx->rsp;
4842 if (pCtx->ss.Attr.n.u1DefBig)
4843 return pCtx->esp;
4844 return pCtx->sp;
4845}
4846
4847
4848/**
4849 * Updates the RIP/EIP/IP to point to the next instruction.
4850 *
4851 * This function leaves the EFLAGS.RF flag alone.
4852 *
4853 * @param pIemCpu The per CPU data.
4854 * @param cbInstr The number of bytes to add.
4855 */
4856static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4857{
4858 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4859 switch (pIemCpu->enmCpuMode)
4860 {
4861 case IEMMODE_16BIT:
4862 Assert(pCtx->rip <= UINT16_MAX);
4863 pCtx->eip += cbInstr;
4864 pCtx->eip &= UINT32_C(0xffff);
4865 break;
4866
4867 case IEMMODE_32BIT:
4868 pCtx->eip += cbInstr;
4869 Assert(pCtx->rip <= UINT32_MAX);
4870 break;
4871
4872 case IEMMODE_64BIT:
4873 pCtx->rip += cbInstr;
4874 break;
4875 default: AssertFailed();
4876 }
4877}
4878
4879
4880#if 0
4881/**
4882 * Updates the RIP/EIP/IP to point to the next instruction.
4883 *
4884 * @param pIemCpu The per CPU data.
4885 */
4886static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4887{
4888 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4889}
4890#endif
4891
4892
4893
4894/**
4895 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4896 *
4897 * @param pIemCpu The per CPU data.
4898 * @param cbInstr The number of bytes to add.
4899 */
4900static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4901{
4902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4903
4904 pCtx->eflags.Bits.u1RF = 0;
4905
4906 switch (pIemCpu->enmCpuMode)
4907 {
4908 case IEMMODE_16BIT:
4909 Assert(pCtx->rip <= UINT16_MAX);
4910 pCtx->eip += cbInstr;
4911 pCtx->eip &= UINT32_C(0xffff);
4912 break;
4913
4914 case IEMMODE_32BIT:
4915 pCtx->eip += cbInstr;
4916 Assert(pCtx->rip <= UINT32_MAX);
4917 break;
4918
4919 case IEMMODE_64BIT:
4920 pCtx->rip += cbInstr;
4921 break;
4922 default: AssertFailed();
4923 }
4924}
4925
4926
4927/**
4928 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4929 *
4930 * @param pIemCpu The per CPU data.
4931 */
4932static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4933{
4934 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4935}
4936
4937
4938/**
4939 * Adds to the stack pointer.
4940 *
4941 * @param pIemCpu The per CPU data.
4942 * @param pCtx The CPU context which SP/ESP/RSP should be
4943 * updated.
4944 * @param cbToAdd The number of bytes to add.
4945 */
4946DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4947{
4948 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4949 pCtx->rsp += cbToAdd;
4950 else if (pCtx->ss.Attr.n.u1DefBig)
4951 pCtx->esp += cbToAdd;
4952 else
4953 pCtx->sp += cbToAdd;
4954}
4955
4956
4957/**
4958 * Subtracts from the stack pointer.
4959 *
4960 * @param pIemCpu The per CPU data.
4961 * @param pCtx The CPU context which SP/ESP/RSP should be
4962 * updated.
4963 * @param cbToSub The number of bytes to subtract.
4964 */
4965DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4966{
4967 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4968 pCtx->rsp -= cbToSub;
4969 else if (pCtx->ss.Attr.n.u1DefBig)
4970 pCtx->esp -= cbToSub;
4971 else
4972 pCtx->sp -= cbToSub;
4973}
4974
4975
4976/**
4977 * Adds to the temporary stack pointer.
4978 *
4979 * @param pIemCpu The per CPU data.
4980 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4981 * @param cbToAdd The number of bytes to add.
4982 * @param pCtx Where to get the current stack mode.
4983 */
4984DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4985{
4986 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4987 pTmpRsp->u += cbToAdd;
4988 else if (pCtx->ss.Attr.n.u1DefBig)
4989 pTmpRsp->DWords.dw0 += cbToAdd;
4990 else
4991 pTmpRsp->Words.w0 += cbToAdd;
4992}
4993
4994
4995/**
4996 * Subtracts from the temporary stack pointer.
4997 *
4998 * @param pIemCpu The per CPU data.
4999 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5000 * @param cbToSub The number of bytes to subtract.
5001 * @param pCtx Where to get the current stack mode.
5002 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5003 * expecting that.
5004 */
5005DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5006{
5007 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5008 pTmpRsp->u -= cbToSub;
5009 else if (pCtx->ss.Attr.n.u1DefBig)
5010 pTmpRsp->DWords.dw0 -= cbToSub;
5011 else
5012 pTmpRsp->Words.w0 -= cbToSub;
5013}
5014
5015
5016/**
5017 * Calculates the effective stack address for a push of the specified size as
5018 * well as the new RSP value (upper bits may be masked).
5019 *
5020 * @returns Effective stack addressf for the push.
5021 * @param pIemCpu The IEM per CPU data.
5022 * @param pCtx Where to get the current stack mode.
5023 * @param cbItem The size of the stack item to pop.
5024 * @param puNewRsp Where to return the new RSP value.
5025 */
5026DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5027{
5028 RTUINT64U uTmpRsp;
5029 RTGCPTR GCPtrTop;
5030 uTmpRsp.u = pCtx->rsp;
5031
5032 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5033 GCPtrTop = uTmpRsp.u -= cbItem;
5034 else if (pCtx->ss.Attr.n.u1DefBig)
5035 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5036 else
5037 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5038 *puNewRsp = uTmpRsp.u;
5039 return GCPtrTop;
5040}
5041
5042
5043/**
5044 * Gets the current stack pointer and calculates the value after a pop of the
5045 * specified size.
5046 *
5047 * @returns Current stack pointer.
5048 * @param pIemCpu The per CPU data.
5049 * @param pCtx Where to get the current stack mode.
5050 * @param cbItem The size of the stack item to pop.
5051 * @param puNewRsp Where to return the new RSP value.
5052 */
5053DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5054{
5055 RTUINT64U uTmpRsp;
5056 RTGCPTR GCPtrTop;
5057 uTmpRsp.u = pCtx->rsp;
5058
5059 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5060 {
5061 GCPtrTop = uTmpRsp.u;
5062 uTmpRsp.u += cbItem;
5063 }
5064 else if (pCtx->ss.Attr.n.u1DefBig)
5065 {
5066 GCPtrTop = uTmpRsp.DWords.dw0;
5067 uTmpRsp.DWords.dw0 += cbItem;
5068 }
5069 else
5070 {
5071 GCPtrTop = uTmpRsp.Words.w0;
5072 uTmpRsp.Words.w0 += cbItem;
5073 }
5074 *puNewRsp = uTmpRsp.u;
5075 return GCPtrTop;
5076}
5077
5078
5079/**
5080 * Calculates the effective stack address for a push of the specified size as
5081 * well as the new temporary RSP value (upper bits may be masked).
5082 *
5083 * @returns Effective stack addressf for the push.
5084 * @param pIemCpu The per CPU data.
5085 * @param pTmpRsp The temporary stack pointer. This is updated.
5086 * @param cbItem The size of the stack item to pop.
5087 * @param puNewRsp Where to return the new RSP value.
5088 */
5089DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5090{
5091 RTGCPTR GCPtrTop;
5092
5093 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5094 GCPtrTop = pTmpRsp->u -= cbItem;
5095 else if (pCtx->ss.Attr.n.u1DefBig)
5096 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5097 else
5098 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5099 return GCPtrTop;
5100}
5101
5102
5103/**
5104 * Gets the effective stack address for a pop of the specified size and
5105 * calculates and updates the temporary RSP.
5106 *
5107 * @returns Current stack pointer.
5108 * @param pIemCpu The per CPU data.
5109 * @param pTmpRsp The temporary stack pointer. This is updated.
5110 * @param pCtx Where to get the current stack mode.
5111 * @param cbItem The size of the stack item to pop.
5112 */
5113DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5114{
5115 RTGCPTR GCPtrTop;
5116 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5117 {
5118 GCPtrTop = pTmpRsp->u;
5119 pTmpRsp->u += cbItem;
5120 }
5121 else if (pCtx->ss.Attr.n.u1DefBig)
5122 {
5123 GCPtrTop = pTmpRsp->DWords.dw0;
5124 pTmpRsp->DWords.dw0 += cbItem;
5125 }
5126 else
5127 {
5128 GCPtrTop = pTmpRsp->Words.w0;
5129 pTmpRsp->Words.w0 += cbItem;
5130 }
5131 return GCPtrTop;
5132}
5133
5134
5135/**
5136 * Checks if an Intel CPUID feature bit is set.
5137 *
5138 * @returns true / false.
5139 *
5140 * @param pIemCpu The IEM per CPU data.
5141 * @param fEdx The EDX bit to test, or 0 if ECX.
5142 * @param fEcx The ECX bit to test, or 0 if EDX.
5143 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
5144 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
5145 */
5146static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5147{
5148 uint32_t uEax, uEbx, uEcx, uEdx;
5149 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
5150 return (fEcx && (uEcx & fEcx))
5151 || (fEdx && (uEdx & fEdx));
5152}
5153
5154
5155/**
5156 * Checks if an AMD CPUID feature bit is set.
5157 *
5158 * @returns true / false.
5159 *
5160 * @param pIemCpu The IEM per CPU data.
5161 * @param fEdx The EDX bit to test, or 0 if ECX.
5162 * @param fEcx The ECX bit to test, or 0 if EDX.
5163 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
5164 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
5165 */
5166static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5167{
5168 uint32_t uEax, uEbx, uEcx, uEdx;
5169 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
5170 return (fEcx && (uEcx & fEcx))
5171 || (fEdx && (uEdx & fEdx));
5172}
5173
5174/** @} */
5175
5176
5177/** @name FPU access and helpers.
5178 *
5179 * @{
5180 */
5181
5182
5183/**
5184 * Hook for preparing to use the host FPU.
5185 *
5186 * This is necessary in ring-0 and raw-mode context.
5187 *
5188 * @param pIemCpu The IEM per CPU data.
5189 */
5190DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5191{
5192#ifdef IN_RING3
5193 NOREF(pIemCpu);
5194#else
5195/** @todo RZ: FIXME */
5196//# error "Implement me"
5197#endif
5198}
5199
5200
5201/**
5202 * Hook for preparing to use the host FPU for SSE
5203 *
5204 * This is necessary in ring-0 and raw-mode context.
5205 *
5206 * @param pIemCpu The IEM per CPU data.
5207 */
5208DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5209{
5210 iemFpuPrepareUsage(pIemCpu);
5211}
5212
5213
5214/**
5215 * Stores a QNaN value into a FPU register.
5216 *
5217 * @param pReg Pointer to the register.
5218 */
5219DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5220{
5221 pReg->au32[0] = UINT32_C(0x00000000);
5222 pReg->au32[1] = UINT32_C(0xc0000000);
5223 pReg->au16[4] = UINT16_C(0xffff);
5224}
5225
5226
5227/**
5228 * Updates the FOP, FPU.CS and FPUIP registers.
5229 *
5230 * @param pIemCpu The IEM per CPU data.
5231 * @param pCtx The CPU context.
5232 */
5233DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5234{
5235 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5236 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5237 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
5238 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5239 {
5240 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5241 * happens in real mode here based on the fnsave and fnstenv images. */
5242 pCtx->fpu.CS = 0;
5243 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5244 }
5245 else
5246 {
5247 pCtx->fpu.CS = pCtx->cs.Sel;
5248 pCtx->fpu.FPUIP = pCtx->rip;
5249 }
5250}
5251
5252
5253/**
5254 * Updates the FPU.DS and FPUDP registers.
5255 *
5256 * @param pIemCpu The IEM per CPU data.
5257 * @param pCtx The CPU context.
5258 * @param iEffSeg The effective segment register.
5259 * @param GCPtrEff The effective address relative to @a iEffSeg.
5260 */
5261DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5262{
5263 RTSEL sel;
5264 switch (iEffSeg)
5265 {
5266 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5267 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5268 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5269 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5270 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5271 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5272 default:
5273 AssertMsgFailed(("%d\n", iEffSeg));
5274 sel = pCtx->ds.Sel;
5275 }
5276 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
5277 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5278 {
5279 pCtx->fpu.DS = 0;
5280 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5281 }
5282 else
5283 {
5284 pCtx->fpu.DS = sel;
5285 pCtx->fpu.FPUDP = GCPtrEff;
5286 }
5287}
5288
5289
5290/**
5291 * Rotates the stack registers in the push direction.
5292 *
5293 * @param pCtx The CPU context.
5294 * @remarks This is a complete waste of time, but fxsave stores the registers in
5295 * stack order.
5296 */
5297DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
5298{
5299 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
5300 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
5301 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
5302 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
5303 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
5304 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
5305 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
5306 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
5307 pCtx->fpu.aRegs[0].r80 = r80Tmp;
5308}
5309
5310
5311/**
5312 * Rotates the stack registers in the pop direction.
5313 *
5314 * @param pCtx The CPU context.
5315 * @remarks This is a complete waste of time, but fxsave stores the registers in
5316 * stack order.
5317 */
5318DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
5319{
5320 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
5321 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
5322 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
5323 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
5324 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
5325 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
5326 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
5327 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
5328 pCtx->fpu.aRegs[7].r80 = r80Tmp;
5329}
5330
5331
5332/**
5333 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5334 * exception prevents it.
5335 *
5336 * @param pIemCpu The IEM per CPU data.
5337 * @param pResult The FPU operation result to push.
5338 * @param pCtx The CPU context.
5339 */
5340static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
5341{
5342 /* Update FSW and bail if there are pending exceptions afterwards. */
5343 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5344 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5345 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5346 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5347 {
5348 pCtx->fpu.FSW = fFsw;
5349 return;
5350 }
5351
5352 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5353 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5354 {
5355 /* All is fine, push the actual value. */
5356 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5357 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
5358 }
5359 else if (pCtx->fpu.FCW & X86_FCW_IM)
5360 {
5361 /* Masked stack overflow, push QNaN. */
5362 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5363 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5364 }
5365 else
5366 {
5367 /* Raise stack overflow, don't push anything. */
5368 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5369 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5370 return;
5371 }
5372
5373 fFsw &= ~X86_FSW_TOP_MASK;
5374 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5375 pCtx->fpu.FSW = fFsw;
5376
5377 iemFpuRotateStackPush(pCtx);
5378}
5379
5380
5381/**
5382 * Stores a result in a FPU register and updates the FSW and FTW.
5383 *
5384 * @param pIemCpu The IEM per CPU data.
5385 * @param pResult The result to store.
5386 * @param iStReg Which FPU register to store it in.
5387 * @param pCtx The CPU context.
5388 */
5389static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
5390{
5391 Assert(iStReg < 8);
5392 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5393 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5394 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5395 pCtx->fpu.FTW |= RT_BIT(iReg);
5396 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
5397}
5398
5399
5400/**
5401 * Only updates the FPU status word (FSW) with the result of the current
5402 * instruction.
5403 *
5404 * @param pCtx The CPU context.
5405 * @param u16FSW The FSW output of the current instruction.
5406 */
5407static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
5408{
5409 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5410 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5411}
5412
5413
5414/**
5415 * Pops one item off the FPU stack if no pending exception prevents it.
5416 *
5417 * @param pCtx The CPU context.
5418 */
5419static void iemFpuMaybePopOne(PCPUMCTX pCtx)
5420{
5421 /* Check pending exceptions. */
5422 uint16_t uFSW = pCtx->fpu.FSW;
5423 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5424 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5425 return;
5426
5427 /* TOP--. */
5428 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5429 uFSW &= ~X86_FSW_TOP_MASK;
5430 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5431 pCtx->fpu.FSW = uFSW;
5432
5433 /* Mark the previous ST0 as empty. */
5434 iOldTop >>= X86_FSW_TOP_SHIFT;
5435 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
5436
5437 /* Rotate the registers. */
5438 iemFpuRotateStackPop(pCtx);
5439}
5440
5441
5442/**
5443 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5444 *
5445 * @param pIemCpu The IEM per CPU data.
5446 * @param pResult The FPU operation result to push.
5447 */
5448static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5449{
5450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5451 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5452 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5453}
5454
5455
5456/**
5457 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5458 * and sets FPUDP and FPUDS.
5459 *
5460 * @param pIemCpu The IEM per CPU data.
5461 * @param pResult The FPU operation result to push.
5462 * @param iEffSeg The effective segment register.
5463 * @param GCPtrEff The effective address relative to @a iEffSeg.
5464 */
5465static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5466{
5467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5468 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5469 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5470 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5471}
5472
5473
5474/**
5475 * Replace ST0 with the first value and push the second onto the FPU stack,
5476 * unless a pending exception prevents it.
5477 *
5478 * @param pIemCpu The IEM per CPU data.
5479 * @param pResult The FPU operation result to store and push.
5480 */
5481static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5482{
5483 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5484 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5485
5486 /* Update FSW and bail if there are pending exceptions afterwards. */
5487 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5488 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5489 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5490 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5491 {
5492 pCtx->fpu.FSW = fFsw;
5493 return;
5494 }
5495
5496 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5497 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5498 {
5499 /* All is fine, push the actual value. */
5500 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5501 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
5502 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
5503 }
5504 else if (pCtx->fpu.FCW & X86_FCW_IM)
5505 {
5506 /* Masked stack overflow, push QNaN. */
5507 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5508 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5509 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5510 }
5511 else
5512 {
5513 /* Raise stack overflow, don't push anything. */
5514 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5515 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5516 return;
5517 }
5518
5519 fFsw &= ~X86_FSW_TOP_MASK;
5520 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5521 pCtx->fpu.FSW = fFsw;
5522
5523 iemFpuRotateStackPush(pCtx);
5524}
5525
5526
5527/**
5528 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5529 * FOP.
5530 *
5531 * @param pIemCpu The IEM per CPU data.
5532 * @param pResult The result to store.
5533 * @param iStReg Which FPU register to store it in.
5534 * @param pCtx The CPU context.
5535 */
5536static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5537{
5538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5539 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5540 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5541}
5542
5543
5544/**
5545 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5546 * FOP, and then pops the stack.
5547 *
5548 * @param pIemCpu The IEM per CPU data.
5549 * @param pResult The result to store.
5550 * @param iStReg Which FPU register to store it in.
5551 * @param pCtx The CPU context.
5552 */
5553static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5554{
5555 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5556 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5557 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5558 iemFpuMaybePopOne(pCtx);
5559}
5560
5561
5562/**
5563 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5564 * FPUDP, and FPUDS.
5565 *
5566 * @param pIemCpu The IEM per CPU data.
5567 * @param pResult The result to store.
5568 * @param iStReg Which FPU register to store it in.
5569 * @param pCtx The CPU context.
5570 * @param iEffSeg The effective memory operand selector register.
5571 * @param GCPtrEff The effective memory operand offset.
5572 */
5573static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5574{
5575 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5576 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
5577 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5578 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5579}
5580
5581
5582/**
5583 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5584 * FPUDP, and FPUDS, and then pops the stack.
5585 *
5586 * @param pIemCpu The IEM per CPU data.
5587 * @param pResult The result to store.
5588 * @param iStReg Which FPU register to store it in.
5589 * @param pCtx The CPU context.
5590 * @param iEffSeg The effective memory operand selector register.
5591 * @param GCPtrEff The effective memory operand offset.
5592 */
5593static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5594 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5595{
5596 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5597 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5598 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5599 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5600 iemFpuMaybePopOne(pCtx);
5601}
5602
5603
5604/**
5605 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5606 *
5607 * @param pIemCpu The IEM per CPU data.
5608 */
5609static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5610{
5611 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
5612}
5613
5614
5615/**
5616 * Marks the specified stack register as free (for FFREE).
5617 *
5618 * @param pIemCpu The IEM per CPU data.
5619 * @param iStReg The register to free.
5620 */
5621static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5622{
5623 Assert(iStReg < 8);
5624 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5625 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5626 pCtx->fpu.FTW &= ~RT_BIT(iReg);
5627}
5628
5629
5630/**
5631 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5632 *
5633 * @param pIemCpu The IEM per CPU data.
5634 */
5635static void iemFpuStackIncTop(PIEMCPU pIemCpu)
5636{
5637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5638 uint16_t uFsw = pCtx->fpu.FSW;
5639 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5640 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5641 uFsw &= ~X86_FSW_TOP_MASK;
5642 uFsw |= uTop;
5643 pCtx->fpu.FSW = uFsw;
5644}
5645
5646
5647/**
5648 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5649 *
5650 * @param pIemCpu The IEM per CPU data.
5651 */
5652static void iemFpuStackDecTop(PIEMCPU pIemCpu)
5653{
5654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5655 uint16_t uFsw = pCtx->fpu.FSW;
5656 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5657 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5658 uFsw &= ~X86_FSW_TOP_MASK;
5659 uFsw |= uTop;
5660 pCtx->fpu.FSW = uFsw;
5661}
5662
5663
5664/**
5665 * Updates the FSW, FOP, FPUIP, and FPUCS.
5666 *
5667 * @param pIemCpu The IEM per CPU data.
5668 * @param u16FSW The FSW from the current instruction.
5669 */
5670static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5671{
5672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5673 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5674 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5675}
5676
5677
5678/**
5679 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5680 *
5681 * @param pIemCpu The IEM per CPU data.
5682 * @param u16FSW The FSW from the current instruction.
5683 */
5684static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5685{
5686 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5687 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5688 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5689 iemFpuMaybePopOne(pCtx);
5690}
5691
5692
5693/**
5694 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5695 *
5696 * @param pIemCpu The IEM per CPU data.
5697 * @param u16FSW The FSW from the current instruction.
5698 * @param iEffSeg The effective memory operand selector register.
5699 * @param GCPtrEff The effective memory operand offset.
5700 */
5701static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5702{
5703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5704 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5705 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5706 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5707}
5708
5709
5710/**
5711 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5712 *
5713 * @param pIemCpu The IEM per CPU data.
5714 * @param u16FSW The FSW from the current instruction.
5715 */
5716static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5717{
5718 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5719 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5720 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5721 iemFpuMaybePopOne(pCtx);
5722 iemFpuMaybePopOne(pCtx);
5723}
5724
5725
5726/**
5727 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5728 *
5729 * @param pIemCpu The IEM per CPU data.
5730 * @param u16FSW The FSW from the current instruction.
5731 * @param iEffSeg The effective memory operand selector register.
5732 * @param GCPtrEff The effective memory operand offset.
5733 */
5734static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5735{
5736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5737 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5738 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5739 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5740 iemFpuMaybePopOne(pCtx);
5741}
5742
5743
5744/**
5745 * Worker routine for raising an FPU stack underflow exception.
5746 *
5747 * @param pIemCpu The IEM per CPU data.
5748 * @param iStReg The stack register being accessed.
5749 * @param pCtx The CPU context.
5750 */
5751static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
5752{
5753 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5754 if (pCtx->fpu.FCW & X86_FCW_IM)
5755 {
5756 /* Masked underflow. */
5757 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5758 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5759 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5760 if (iStReg != UINT8_MAX)
5761 {
5762 pCtx->fpu.FTW |= RT_BIT(iReg);
5763 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
5764 }
5765 }
5766 else
5767 {
5768 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5769 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5770 }
5771}
5772
5773
5774/**
5775 * Raises a FPU stack underflow exception.
5776 *
5777 * @param pIemCpu The IEM per CPU data.
5778 * @param iStReg The destination register that should be loaded
5779 * with QNaN if \#IS is not masked. Specify
5780 * UINT8_MAX if none (like for fcom).
5781 */
5782DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5783{
5784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5785 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5786 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5787}
5788
5789
5790DECL_NO_INLINE(static, void)
5791iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5792{
5793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5794 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5795 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5796 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5797}
5798
5799
5800DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5801{
5802 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5803 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5804 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5805 iemFpuMaybePopOne(pCtx);
5806}
5807
5808
5809DECL_NO_INLINE(static, void)
5810iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5811{
5812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5813 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5814 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5815 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5816 iemFpuMaybePopOne(pCtx);
5817}
5818
5819
5820DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5821{
5822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5823 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5824 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
5825 iemFpuMaybePopOne(pCtx);
5826 iemFpuMaybePopOne(pCtx);
5827}
5828
5829
5830DECL_NO_INLINE(static, void)
5831iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5832{
5833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5834 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5835
5836 if (pCtx->fpu.FCW & X86_FCW_IM)
5837 {
5838 /* Masked overflow - Push QNaN. */
5839 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5840 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5841 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5842 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5843 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5844 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5845 iemFpuRotateStackPush(pCtx);
5846 }
5847 else
5848 {
5849 /* Exception pending - don't change TOP or the register stack. */
5850 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5851 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5852 }
5853}
5854
5855
5856DECL_NO_INLINE(static, void)
5857iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5858{
5859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5860 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5861
5862 if (pCtx->fpu.FCW & X86_FCW_IM)
5863 {
5864 /* Masked overflow - Push QNaN. */
5865 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5866 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5867 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5868 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5869 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5870 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5871 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5872 iemFpuRotateStackPush(pCtx);
5873 }
5874 else
5875 {
5876 /* Exception pending - don't change TOP or the register stack. */
5877 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5878 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5879 }
5880}
5881
5882
5883/**
5884 * Worker routine for raising an FPU stack overflow exception on a push.
5885 *
5886 * @param pIemCpu The IEM per CPU data.
5887 * @param pCtx The CPU context.
5888 */
5889static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5890{
5891 if (pCtx->fpu.FCW & X86_FCW_IM)
5892 {
5893 /* Masked overflow. */
5894 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5895 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5896 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5897 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5898 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5899 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5900 iemFpuRotateStackPush(pCtx);
5901 }
5902 else
5903 {
5904 /* Exception pending - don't change TOP or the register stack. */
5905 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5906 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5907 }
5908}
5909
5910
5911/**
5912 * Raises a FPU stack overflow exception on a push.
5913 *
5914 * @param pIemCpu The IEM per CPU data.
5915 */
5916DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5917{
5918 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5919 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5920 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5921}
5922
5923
5924/**
5925 * Raises a FPU stack overflow exception on a push with a memory operand.
5926 *
5927 * @param pIemCpu The IEM per CPU data.
5928 * @param iEffSeg The effective memory operand selector register.
5929 * @param GCPtrEff The effective memory operand offset.
5930 */
5931DECL_NO_INLINE(static, void)
5932iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5933{
5934 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5935 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5936 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5937 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5938}
5939
5940
5941static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5942{
5943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5944 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5945 if (pCtx->fpu.FTW & RT_BIT(iReg))
5946 return VINF_SUCCESS;
5947 return VERR_NOT_FOUND;
5948}
5949
5950
5951static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5952{
5953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5954 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5955 if (pCtx->fpu.FTW & RT_BIT(iReg))
5956 {
5957 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
5958 return VINF_SUCCESS;
5959 }
5960 return VERR_NOT_FOUND;
5961}
5962
5963
5964static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5965 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5966{
5967 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5968 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5969 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5970 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5971 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5972 {
5973 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5974 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
5975 return VINF_SUCCESS;
5976 }
5977 return VERR_NOT_FOUND;
5978}
5979
5980
5981static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5982{
5983 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5984 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5985 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5986 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5987 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5988 {
5989 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5990 return VINF_SUCCESS;
5991 }
5992 return VERR_NOT_FOUND;
5993}
5994
5995
5996/**
5997 * Updates the FPU exception status after FCW is changed.
5998 *
5999 * @param pCtx The CPU context.
6000 */
6001static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
6002{
6003 uint16_t u16Fsw = pCtx->fpu.FSW;
6004 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
6005 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6006 else
6007 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6008 pCtx->fpu.FSW = u16Fsw;
6009}
6010
6011
6012/**
6013 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6014 *
6015 * @returns The full FTW.
6016 * @param pCtx The CPU state.
6017 */
6018static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
6019{
6020 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
6021 uint16_t u16Ftw = 0;
6022 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
6023 for (unsigned iSt = 0; iSt < 8; iSt++)
6024 {
6025 unsigned const iReg = (iSt + iTop) & 7;
6026 if (!(u8Ftw & RT_BIT(iReg)))
6027 u16Ftw |= 3 << (iReg * 2); /* empty */
6028 else
6029 {
6030 uint16_t uTag;
6031 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
6032 if (pr80Reg->s.uExponent == 0x7fff)
6033 uTag = 2; /* Exponent is all 1's => Special. */
6034 else if (pr80Reg->s.uExponent == 0x0000)
6035 {
6036 if (pr80Reg->s.u64Mantissa == 0x0000)
6037 uTag = 1; /* All bits are zero => Zero. */
6038 else
6039 uTag = 2; /* Must be special. */
6040 }
6041 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6042 uTag = 0; /* Valid. */
6043 else
6044 uTag = 2; /* Must be special. */
6045
6046 u16Ftw |= uTag << (iReg * 2); /* empty */
6047 }
6048 }
6049
6050 return u16Ftw;
6051}
6052
6053
6054/**
6055 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6056 *
6057 * @returns The compressed FTW.
6058 * @param u16FullFtw The full FTW to convert.
6059 */
6060static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6061{
6062 uint8_t u8Ftw = 0;
6063 for (unsigned i = 0; i < 8; i++)
6064 {
6065 if ((u16FullFtw & 3) != 3 /*empty*/)
6066 u8Ftw |= RT_BIT(i);
6067 u16FullFtw >>= 2;
6068 }
6069
6070 return u8Ftw;
6071}
6072
6073/** @} */
6074
6075
6076/** @name Memory access.
6077 *
6078 * @{
6079 */
6080
6081
6082/**
6083 * Updates the IEMCPU::cbWritten counter if applicable.
6084 *
6085 * @param pIemCpu The IEM per CPU data.
6086 * @param fAccess The access being accounted for.
6087 * @param cbMem The access size.
6088 */
6089DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6090{
6091 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6092 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6093 pIemCpu->cbWritten += (uint32_t)cbMem;
6094}
6095
6096
6097/**
6098 * Checks if the given segment can be written to, raise the appropriate
6099 * exception if not.
6100 *
6101 * @returns VBox strict status code.
6102 *
6103 * @param pIemCpu The IEM per CPU data.
6104 * @param pHid Pointer to the hidden register.
6105 * @param iSegReg The register number.
6106 * @param pu64BaseAddr Where to return the base address to use for the
6107 * segment. (In 64-bit code it may differ from the
6108 * base in the hidden segment.)
6109 */
6110static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6111{
6112 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6113 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6114 else
6115 {
6116 if (!pHid->Attr.n.u1Present)
6117 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6118
6119 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6120 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6121 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6122 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6123 *pu64BaseAddr = pHid->u64Base;
6124 }
6125 return VINF_SUCCESS;
6126}
6127
6128
6129/**
6130 * Checks if the given segment can be read from, raise the appropriate
6131 * exception if not.
6132 *
6133 * @returns VBox strict status code.
6134 *
6135 * @param pIemCpu The IEM per CPU data.
6136 * @param pHid Pointer to the hidden register.
6137 * @param iSegReg The register number.
6138 * @param pu64BaseAddr Where to return the base address to use for the
6139 * segment. (In 64-bit code it may differ from the
6140 * base in the hidden segment.)
6141 */
6142static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6143{
6144 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6145 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6146 else
6147 {
6148 if (!pHid->Attr.n.u1Present)
6149 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6150
6151 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6152 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6153 *pu64BaseAddr = pHid->u64Base;
6154 }
6155 return VINF_SUCCESS;
6156}
6157
6158
6159/**
6160 * Applies the segment limit, base and attributes.
6161 *
6162 * This may raise a \#GP or \#SS.
6163 *
6164 * @returns VBox strict status code.
6165 *
6166 * @param pIemCpu The IEM per CPU data.
6167 * @param fAccess The kind of access which is being performed.
6168 * @param iSegReg The index of the segment register to apply.
6169 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6170 * TSS, ++).
6171 * @param pGCPtrMem Pointer to the guest memory address to apply
6172 * segmentation to. Input and output parameter.
6173 */
6174static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
6175 size_t cbMem, PRTGCPTR pGCPtrMem)
6176{
6177 if (iSegReg == UINT8_MAX)
6178 return VINF_SUCCESS;
6179
6180 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6181 switch (pIemCpu->enmCpuMode)
6182 {
6183 case IEMMODE_16BIT:
6184 case IEMMODE_32BIT:
6185 {
6186 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6187 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6188
6189 Assert(pSel->Attr.n.u1Present);
6190 Assert(pSel->Attr.n.u1DescType);
6191 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6192 {
6193 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6194 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6195 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6196
6197 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6198 {
6199 /** @todo CPL check. */
6200 }
6201
6202 /*
6203 * There are two kinds of data selectors, normal and expand down.
6204 */
6205 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6206 {
6207 if ( GCPtrFirst32 > pSel->u32Limit
6208 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6209 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6210 }
6211 else
6212 {
6213 /*
6214 * The upper boundary is defined by the B bit, not the G bit!
6215 */
6216 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6217 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6218 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6219 }
6220 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6221 }
6222 else
6223 {
6224
6225 /*
6226 * Code selector and usually be used to read thru, writing is
6227 * only permitted in real and V8086 mode.
6228 */
6229 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6230 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6231 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6232 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6233 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6234
6235 if ( GCPtrFirst32 > pSel->u32Limit
6236 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6237 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6238
6239 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6240 {
6241 /** @todo CPL check. */
6242 }
6243
6244 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6245 }
6246 return VINF_SUCCESS;
6247 }
6248
6249 case IEMMODE_64BIT:
6250 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6251 *pGCPtrMem += pSel->u64Base;
6252 return VINF_SUCCESS;
6253
6254 default:
6255 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6256 }
6257}
6258
6259
6260/**
6261 * Translates a virtual address to a physical physical address and checks if we
6262 * can access the page as specified.
6263 *
6264 * @param pIemCpu The IEM per CPU data.
6265 * @param GCPtrMem The virtual address.
6266 * @param fAccess The intended access.
6267 * @param pGCPhysMem Where to return the physical address.
6268 */
6269static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
6270 PRTGCPHYS pGCPhysMem)
6271{
6272 /** @todo Need a different PGM interface here. We're currently using
6273 * generic / REM interfaces. this won't cut it for R0 & RC. */
6274 RTGCPHYS GCPhys;
6275 uint64_t fFlags;
6276 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6277 if (RT_FAILURE(rc))
6278 {
6279 /** @todo Check unassigned memory in unpaged mode. */
6280 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6281 *pGCPhysMem = NIL_RTGCPHYS;
6282 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6283 }
6284
6285 /* If the page is writable and does not have the no-exec bit set, all
6286 access is allowed. Otherwise we'll have to check more carefully... */
6287 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6288 {
6289 /* Write to read only memory? */
6290 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6291 && !(fFlags & X86_PTE_RW)
6292 && ( pIemCpu->uCpl != 0
6293 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6294 {
6295 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6296 *pGCPhysMem = NIL_RTGCPHYS;
6297 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6298 }
6299
6300 /* Kernel memory accessed by userland? */
6301 if ( !(fFlags & X86_PTE_US)
6302 && pIemCpu->uCpl == 3
6303 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6304 {
6305 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6306 *pGCPhysMem = NIL_RTGCPHYS;
6307 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6308 }
6309
6310 /* Executing non-executable memory? */
6311 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6312 && (fFlags & X86_PTE_PAE_NX)
6313 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6314 {
6315 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6316 *pGCPhysMem = NIL_RTGCPHYS;
6317 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6318 VERR_ACCESS_DENIED);
6319 }
6320 }
6321
6322 /*
6323 * Set the dirty / access flags.
6324 * ASSUMES this is set when the address is translated rather than on committ...
6325 */
6326 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6327 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6328 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6329 {
6330 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6331 AssertRC(rc2);
6332 }
6333
6334 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6335 *pGCPhysMem = GCPhys;
6336 return VINF_SUCCESS;
6337}
6338
6339
6340
6341/**
6342 * Maps a physical page.
6343 *
6344 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6345 * @param pIemCpu The IEM per CPU data.
6346 * @param GCPhysMem The physical address.
6347 * @param fAccess The intended access.
6348 * @param ppvMem Where to return the mapping address.
6349 * @param pLock The PGM lock.
6350 */
6351static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6352{
6353#ifdef IEM_VERIFICATION_MODE_FULL
6354 /* Force the alternative path so we can ignore writes. */
6355 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6356 {
6357 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6358 {
6359 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6360 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6361 if (RT_FAILURE(rc2))
6362 pIemCpu->fProblematicMemory = true;
6363 }
6364 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6365 }
6366#endif
6367#ifdef IEM_LOG_MEMORY_WRITES
6368 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6369 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6370#endif
6371#ifdef IEM_VERIFICATION_MODE_MINIMAL
6372 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6373#endif
6374
6375 /** @todo This API may require some improving later. A private deal with PGM
6376 * regarding locking and unlocking needs to be struct. A couple of TLBs
6377 * living in PGM, but with publicly accessible inlined access methods
6378 * could perhaps be an even better solution. */
6379 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6380 GCPhysMem,
6381 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6382 pIemCpu->fBypassHandlers,
6383 ppvMem,
6384 pLock);
6385 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6386 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6387
6388#ifdef IEM_VERIFICATION_MODE_FULL
6389 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6390 pIemCpu->fProblematicMemory = true;
6391#endif
6392 return rc;
6393}
6394
6395
6396/**
6397 * Unmap a page previously mapped by iemMemPageMap.
6398 *
6399 * @param pIemCpu The IEM per CPU data.
6400 * @param GCPhysMem The physical address.
6401 * @param fAccess The intended access.
6402 * @param pvMem What iemMemPageMap returned.
6403 * @param pLock The PGM lock.
6404 */
6405DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6406{
6407 NOREF(pIemCpu);
6408 NOREF(GCPhysMem);
6409 NOREF(fAccess);
6410 NOREF(pvMem);
6411 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6412}
6413
6414
6415/**
6416 * Looks up a memory mapping entry.
6417 *
6418 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6419 * @param pIemCpu The IEM per CPU data.
6420 * @param pvMem The memory address.
6421 * @param fAccess The access to.
6422 */
6423DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6424{
6425 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6426 if ( pIemCpu->aMemMappings[0].pv == pvMem
6427 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6428 return 0;
6429 if ( pIemCpu->aMemMappings[1].pv == pvMem
6430 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6431 return 1;
6432 if ( pIemCpu->aMemMappings[2].pv == pvMem
6433 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6434 return 2;
6435 return VERR_NOT_FOUND;
6436}
6437
6438
6439/**
6440 * Finds a free memmap entry when using iNextMapping doesn't work.
6441 *
6442 * @returns Memory mapping index, 1024 on failure.
6443 * @param pIemCpu The IEM per CPU data.
6444 */
6445static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6446{
6447 /*
6448 * The easy case.
6449 */
6450 if (pIemCpu->cActiveMappings == 0)
6451 {
6452 pIemCpu->iNextMapping = 1;
6453 return 0;
6454 }
6455
6456 /* There should be enough mappings for all instructions. */
6457 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6458
6459 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6460 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6461 return i;
6462
6463 AssertFailedReturn(1024);
6464}
6465
6466
6467/**
6468 * Commits a bounce buffer that needs writing back and unmaps it.
6469 *
6470 * @returns Strict VBox status code.
6471 * @param pIemCpu The IEM per CPU data.
6472 * @param iMemMap The index of the buffer to commit.
6473 */
6474static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6475{
6476 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6477 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6478
6479 /*
6480 * Do the writing.
6481 */
6482 int rc;
6483#ifndef IEM_VERIFICATION_MODE_MINIMAL
6484 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6485 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6486 {
6487 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6488 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6489 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6490 if (!pIemCpu->fBypassHandlers)
6491 {
6492 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6493 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6494 pbBuf,
6495 cbFirst);
6496 if (cbSecond && rc == VINF_SUCCESS)
6497 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6498 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6499 pbBuf + cbFirst,
6500 cbSecond);
6501 }
6502 else
6503 {
6504 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6505 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6506 pbBuf,
6507 cbFirst);
6508 if (cbSecond && rc == VINF_SUCCESS)
6509 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6510 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6511 pbBuf + cbFirst,
6512 cbSecond);
6513 }
6514 if (rc != VINF_SUCCESS)
6515 {
6516 /** @todo status code handling */
6517 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6518 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
6519 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6520 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6521 }
6522 }
6523 else
6524#endif
6525 rc = VINF_SUCCESS;
6526
6527#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6528 /*
6529 * Record the write(s).
6530 */
6531 if (!pIemCpu->fNoRem)
6532 {
6533 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6534 if (pEvtRec)
6535 {
6536 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6537 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6538 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6539 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6540 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6541 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6542 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6543 }
6544 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6545 {
6546 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6547 if (pEvtRec)
6548 {
6549 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6550 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6551 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6552 memcpy(pEvtRec->u.RamWrite.ab,
6553 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6554 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6555 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6556 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6557 }
6558 }
6559 }
6560#endif
6561#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6562 if (rc == VINF_SUCCESS)
6563 {
6564 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6565 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6566 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6567 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6568 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6569 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6570
6571 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6572 g_cbIemWrote = cbWrote;
6573 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6574 }
6575#endif
6576
6577 /*
6578 * Free the mapping entry.
6579 */
6580 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6581 Assert(pIemCpu->cActiveMappings != 0);
6582 pIemCpu->cActiveMappings--;
6583 return rc;
6584}
6585
6586
6587/**
6588 * iemMemMap worker that deals with a request crossing pages.
6589 */
6590static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
6591 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6592{
6593 /*
6594 * Do the address translations.
6595 */
6596 RTGCPHYS GCPhysFirst;
6597 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6598 if (rcStrict != VINF_SUCCESS)
6599 return rcStrict;
6600
6601/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6602 * last byte. */
6603 RTGCPHYS GCPhysSecond;
6604 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6605 if (rcStrict != VINF_SUCCESS)
6606 return rcStrict;
6607 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6608
6609#ifdef IEM_VERIFICATION_MODE_FULL
6610 /*
6611 * Detect problematic memory when verifying so we can select
6612 * the right execution engine. (TLB: Redo this.)
6613 */
6614 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6615 {
6616 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
6617 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6618 if (RT_SUCCESS(rc2))
6619 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
6620 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6621 if (RT_FAILURE(rc2))
6622 pIemCpu->fProblematicMemory = true;
6623 }
6624#endif
6625
6626
6627 /*
6628 * Read in the current memory content if it's a read, execute or partial
6629 * write access.
6630 */
6631 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6632 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6633 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6634
6635 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6636 {
6637 int rc;
6638 if (!pIemCpu->fBypassHandlers)
6639 {
6640 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
6641 if (rc != VINF_SUCCESS)
6642 {
6643 /** @todo status code handling */
6644 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6645 return rc;
6646 }
6647 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
6648 if (rc != VINF_SUCCESS)
6649 {
6650 /** @todo status code handling */
6651 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6652 return rc;
6653 }
6654 }
6655 else
6656 {
6657 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
6658 if (rc != VINF_SUCCESS)
6659 {
6660 /** @todo status code handling */
6661 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6662 return rc;
6663 }
6664 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6665 if (rc != VINF_SUCCESS)
6666 {
6667 /** @todo status code handling */
6668 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6669 return rc;
6670 }
6671 }
6672
6673#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6674 if ( !pIemCpu->fNoRem
6675 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6676 {
6677 /*
6678 * Record the reads.
6679 */
6680 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6681 if (pEvtRec)
6682 {
6683 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6684 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6685 pEvtRec->u.RamRead.cb = cbFirstPage;
6686 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6687 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6688 }
6689 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6690 if (pEvtRec)
6691 {
6692 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6693 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6694 pEvtRec->u.RamRead.cb = cbSecondPage;
6695 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6696 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6697 }
6698 }
6699#endif
6700 }
6701#ifdef VBOX_STRICT
6702 else
6703 memset(pbBuf, 0xcc, cbMem);
6704#endif
6705#ifdef VBOX_STRICT
6706 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6707 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6708#endif
6709
6710 /*
6711 * Commit the bounce buffer entry.
6712 */
6713 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6714 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6715 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6716 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6717 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6718 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6719 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6720 pIemCpu->iNextMapping = iMemMap + 1;
6721 pIemCpu->cActiveMappings++;
6722
6723 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6724 *ppvMem = pbBuf;
6725 return VINF_SUCCESS;
6726}
6727
6728
6729/**
6730 * iemMemMap woker that deals with iemMemPageMap failures.
6731 */
6732static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6733 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6734{
6735 /*
6736 * Filter out conditions we can handle and the ones which shouldn't happen.
6737 */
6738 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6739 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6740 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6741 {
6742 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6743 return rcMap;
6744 }
6745 pIemCpu->cPotentialExits++;
6746
6747 /*
6748 * Read in the current memory content if it's a read, execute or partial
6749 * write access.
6750 */
6751 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6752 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6753 {
6754 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6755 memset(pbBuf, 0xff, cbMem);
6756 else
6757 {
6758 int rc;
6759 if (!pIemCpu->fBypassHandlers)
6760 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
6761 else
6762 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6763 if (rc != VINF_SUCCESS)
6764 {
6765 /** @todo status code handling */
6766 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
6767 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
6768 return rc;
6769 }
6770 }
6771
6772#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6773 if ( !pIemCpu->fNoRem
6774 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6775 {
6776 /*
6777 * Record the read.
6778 */
6779 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6780 if (pEvtRec)
6781 {
6782 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6783 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6784 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6785 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6786 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6787 }
6788 }
6789#endif
6790 }
6791#ifdef VBOX_STRICT
6792 else
6793 memset(pbBuf, 0xcc, cbMem);
6794#endif
6795#ifdef VBOX_STRICT
6796 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6797 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6798#endif
6799
6800 /*
6801 * Commit the bounce buffer entry.
6802 */
6803 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6804 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6805 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6806 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6807 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6808 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6809 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6810 pIemCpu->iNextMapping = iMemMap + 1;
6811 pIemCpu->cActiveMappings++;
6812
6813 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6814 *ppvMem = pbBuf;
6815 return VINF_SUCCESS;
6816}
6817
6818
6819
6820/**
6821 * Maps the specified guest memory for the given kind of access.
6822 *
6823 * This may be using bounce buffering of the memory if it's crossing a page
6824 * boundary or if there is an access handler installed for any of it. Because
6825 * of lock prefix guarantees, we're in for some extra clutter when this
6826 * happens.
6827 *
6828 * This may raise a \#GP, \#SS, \#PF or \#AC.
6829 *
6830 * @returns VBox strict status code.
6831 *
6832 * @param pIemCpu The IEM per CPU data.
6833 * @param ppvMem Where to return the pointer to the mapped
6834 * memory.
6835 * @param cbMem The number of bytes to map. This is usually 1,
6836 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6837 * string operations it can be up to a page.
6838 * @param iSegReg The index of the segment register to use for
6839 * this access. The base and limits are checked.
6840 * Use UINT8_MAX to indicate that no segmentation
6841 * is required (for IDT, GDT and LDT accesses).
6842 * @param GCPtrMem The address of the guest memory.
6843 * @param a_fAccess How the memory is being accessed. The
6844 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6845 * how to map the memory, while the
6846 * IEM_ACCESS_WHAT_XXX bit is used when raising
6847 * exceptions.
6848 */
6849static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6850{
6851 /*
6852 * Check the input and figure out which mapping entry to use.
6853 */
6854 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6855 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6856
6857 unsigned iMemMap = pIemCpu->iNextMapping;
6858 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6859 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6860 {
6861 iMemMap = iemMemMapFindFree(pIemCpu);
6862 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6863 }
6864
6865 /*
6866 * Map the memory, checking that we can actually access it. If something
6867 * slightly complicated happens, fall back on bounce buffering.
6868 */
6869 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6870 if (rcStrict != VINF_SUCCESS)
6871 return rcStrict;
6872
6873 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6874 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6875
6876 RTGCPHYS GCPhysFirst;
6877 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6878 if (rcStrict != VINF_SUCCESS)
6879 return rcStrict;
6880
6881 void *pvMem;
6882 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6883 if (rcStrict != VINF_SUCCESS)
6884 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6885
6886 /*
6887 * Fill in the mapping table entry.
6888 */
6889 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6890 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6891 pIemCpu->iNextMapping = iMemMap + 1;
6892 pIemCpu->cActiveMappings++;
6893
6894 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6895 *ppvMem = pvMem;
6896 return VINF_SUCCESS;
6897}
6898
6899
6900/**
6901 * Commits the guest memory if bounce buffered and unmaps it.
6902 *
6903 * @returns Strict VBox status code.
6904 * @param pIemCpu The IEM per CPU data.
6905 * @param pvMem The mapping.
6906 * @param fAccess The kind of access.
6907 */
6908static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6909{
6910 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
6911 AssertReturn(iMemMap >= 0, iMemMap);
6912
6913 /* If it's bounce buffered, we may need to write back the buffer. */
6914 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6915 {
6916 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6917 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
6918 }
6919 /* Otherwise unlock it. */
6920 else
6921 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6922
6923 /* Free the entry. */
6924 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6925 Assert(pIemCpu->cActiveMappings != 0);
6926 pIemCpu->cActiveMappings--;
6927 return VINF_SUCCESS;
6928}
6929
6930
6931/**
6932 * Rollbacks mappings, releasing page locks and such.
6933 *
6934 * The caller shall only call this after checking cActiveMappings.
6935 *
6936 * @returns Strict VBox status code to pass up.
6937 * @param pIemCpu The IEM per CPU data.
6938 */
6939static void iemMemRollback(PIEMCPU pIemCpu)
6940{
6941 Assert(pIemCpu->cActiveMappings > 0);
6942
6943 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
6944 while (iMemMap-- > 0)
6945 {
6946 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
6947 if (fAccess != IEM_ACCESS_INVALID)
6948 {
6949 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6950 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
6951 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6952 Assert(pIemCpu->cActiveMappings > 0);
6953 pIemCpu->cActiveMappings--;
6954 }
6955 }
6956}
6957
6958
6959/**
6960 * Fetches a data byte.
6961 *
6962 * @returns Strict VBox status code.
6963 * @param pIemCpu The IEM per CPU data.
6964 * @param pu8Dst Where to return the byte.
6965 * @param iSegReg The index of the segment register to use for
6966 * this access. The base and limits are checked.
6967 * @param GCPtrMem The address of the guest memory.
6968 */
6969static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6970{
6971 /* The lazy approach for now... */
6972 uint8_t const *pu8Src;
6973 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6974 if (rc == VINF_SUCCESS)
6975 {
6976 *pu8Dst = *pu8Src;
6977 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6978 }
6979 return rc;
6980}
6981
6982
6983/**
6984 * Fetches a data word.
6985 *
6986 * @returns Strict VBox status code.
6987 * @param pIemCpu The IEM per CPU data.
6988 * @param pu16Dst Where to return the word.
6989 * @param iSegReg The index of the segment register to use for
6990 * this access. The base and limits are checked.
6991 * @param GCPtrMem The address of the guest memory.
6992 */
6993static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6994{
6995 /* The lazy approach for now... */
6996 uint16_t const *pu16Src;
6997 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6998 if (rc == VINF_SUCCESS)
6999 {
7000 *pu16Dst = *pu16Src;
7001 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7002 }
7003 return rc;
7004}
7005
7006
7007/**
7008 * Fetches a data dword.
7009 *
7010 * @returns Strict VBox status code.
7011 * @param pIemCpu The IEM per CPU data.
7012 * @param pu32Dst Where to return the dword.
7013 * @param iSegReg The index of the segment register to use for
7014 * this access. The base and limits are checked.
7015 * @param GCPtrMem The address of the guest memory.
7016 */
7017static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7018{
7019 /* The lazy approach for now... */
7020 uint32_t const *pu32Src;
7021 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7022 if (rc == VINF_SUCCESS)
7023 {
7024 *pu32Dst = *pu32Src;
7025 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7026 }
7027 return rc;
7028}
7029
7030
7031#ifdef SOME_UNUSED_FUNCTION
7032/**
7033 * Fetches a data dword and sign extends it to a qword.
7034 *
7035 * @returns Strict VBox status code.
7036 * @param pIemCpu The IEM per CPU data.
7037 * @param pu64Dst Where to return the sign extended value.
7038 * @param iSegReg The index of the segment register to use for
7039 * this access. The base and limits are checked.
7040 * @param GCPtrMem The address of the guest memory.
7041 */
7042static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7043{
7044 /* The lazy approach for now... */
7045 int32_t const *pi32Src;
7046 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7047 if (rc == VINF_SUCCESS)
7048 {
7049 *pu64Dst = *pi32Src;
7050 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7051 }
7052#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7053 else
7054 *pu64Dst = 0;
7055#endif
7056 return rc;
7057}
7058#endif
7059
7060
7061/**
7062 * Fetches a data qword.
7063 *
7064 * @returns Strict VBox status code.
7065 * @param pIemCpu The IEM per CPU data.
7066 * @param pu64Dst Where to return the qword.
7067 * @param iSegReg The index of the segment register to use for
7068 * this access. The base and limits are checked.
7069 * @param GCPtrMem The address of the guest memory.
7070 */
7071static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7072{
7073 /* The lazy approach for now... */
7074 uint64_t const *pu64Src;
7075 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7076 if (rc == VINF_SUCCESS)
7077 {
7078 *pu64Dst = *pu64Src;
7079 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7080 }
7081 return rc;
7082}
7083
7084
7085/**
7086 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7087 *
7088 * @returns Strict VBox status code.
7089 * @param pIemCpu The IEM per CPU data.
7090 * @param pu64Dst Where to return the qword.
7091 * @param iSegReg The index of the segment register to use for
7092 * this access. The base and limits are checked.
7093 * @param GCPtrMem The address of the guest memory.
7094 */
7095static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7096{
7097 /* The lazy approach for now... */
7098 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7099 if (RT_UNLIKELY(GCPtrMem & 15))
7100 return iemRaiseGeneralProtectionFault0(pIemCpu);
7101
7102 uint64_t const *pu64Src;
7103 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7104 if (rc == VINF_SUCCESS)
7105 {
7106 *pu64Dst = *pu64Src;
7107 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7108 }
7109 return rc;
7110}
7111
7112
7113/**
7114 * Fetches a data tword.
7115 *
7116 * @returns Strict VBox status code.
7117 * @param pIemCpu The IEM per CPU data.
7118 * @param pr80Dst Where to return the tword.
7119 * @param iSegReg The index of the segment register to use for
7120 * this access. The base and limits are checked.
7121 * @param GCPtrMem The address of the guest memory.
7122 */
7123static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7124{
7125 /* The lazy approach for now... */
7126 PCRTFLOAT80U pr80Src;
7127 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7128 if (rc == VINF_SUCCESS)
7129 {
7130 *pr80Dst = *pr80Src;
7131 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7132 }
7133 return rc;
7134}
7135
7136
7137/**
7138 * Fetches a data dqword (double qword), generally SSE related.
7139 *
7140 * @returns Strict VBox status code.
7141 * @param pIemCpu The IEM per CPU data.
7142 * @param pu128Dst Where to return the qword.
7143 * @param iSegReg The index of the segment register to use for
7144 * this access. The base and limits are checked.
7145 * @param GCPtrMem The address of the guest memory.
7146 */
7147static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7148{
7149 /* The lazy approach for now... */
7150 uint128_t const *pu128Src;
7151 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7152 if (rc == VINF_SUCCESS)
7153 {
7154 *pu128Dst = *pu128Src;
7155 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7156 }
7157 return rc;
7158}
7159
7160
7161/**
7162 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7163 * related.
7164 *
7165 * Raises \#GP(0) if not aligned.
7166 *
7167 * @returns Strict VBox status code.
7168 * @param pIemCpu The IEM per CPU data.
7169 * @param pu128Dst Where to return the qword.
7170 * @param iSegReg The index of the segment register to use for
7171 * this access. The base and limits are checked.
7172 * @param GCPtrMem The address of the guest memory.
7173 */
7174static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7175{
7176 /* The lazy approach for now... */
7177 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7178 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7179 return iemRaiseGeneralProtectionFault0(pIemCpu);
7180
7181 uint128_t const *pu128Src;
7182 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7183 if (rc == VINF_SUCCESS)
7184 {
7185 *pu128Dst = *pu128Src;
7186 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7187 }
7188 return rc;
7189}
7190
7191
7192
7193
7194/**
7195 * Fetches a descriptor register (lgdt, lidt).
7196 *
7197 * @returns Strict VBox status code.
7198 * @param pIemCpu The IEM per CPU data.
7199 * @param pcbLimit Where to return the limit.
7200 * @param pGCPTrBase Where to return the base.
7201 * @param iSegReg The index of the segment register to use for
7202 * this access. The base and limits are checked.
7203 * @param GCPtrMem The address of the guest memory.
7204 * @param enmOpSize The effective operand size.
7205 */
7206static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
7207 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7208{
7209 uint8_t const *pu8Src;
7210 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7211 (void **)&pu8Src,
7212 enmOpSize == IEMMODE_64BIT
7213 ? 2 + 8
7214 : enmOpSize == IEMMODE_32BIT
7215 ? 2 + 4
7216 : 2 + 3,
7217 iSegReg,
7218 GCPtrMem,
7219 IEM_ACCESS_DATA_R);
7220 if (rcStrict == VINF_SUCCESS)
7221 {
7222 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7223 switch (enmOpSize)
7224 {
7225 case IEMMODE_16BIT:
7226 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7227 break;
7228 case IEMMODE_32BIT:
7229 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7230 break;
7231 case IEMMODE_64BIT:
7232 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7233 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7234 break;
7235
7236 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7237 }
7238 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7239 }
7240 return rcStrict;
7241}
7242
7243
7244
7245/**
7246 * Stores a data byte.
7247 *
7248 * @returns Strict VBox status code.
7249 * @param pIemCpu The IEM per CPU data.
7250 * @param iSegReg The index of the segment register to use for
7251 * this access. The base and limits are checked.
7252 * @param GCPtrMem The address of the guest memory.
7253 * @param u8Value The value to store.
7254 */
7255static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7256{
7257 /* The lazy approach for now... */
7258 uint8_t *pu8Dst;
7259 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7260 if (rc == VINF_SUCCESS)
7261 {
7262 *pu8Dst = u8Value;
7263 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7264 }
7265 return rc;
7266}
7267
7268
7269/**
7270 * Stores a data word.
7271 *
7272 * @returns Strict VBox status code.
7273 * @param pIemCpu The IEM per CPU data.
7274 * @param iSegReg The index of the segment register to use for
7275 * this access. The base and limits are checked.
7276 * @param GCPtrMem The address of the guest memory.
7277 * @param u16Value The value to store.
7278 */
7279static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7280{
7281 /* The lazy approach for now... */
7282 uint16_t *pu16Dst;
7283 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7284 if (rc == VINF_SUCCESS)
7285 {
7286 *pu16Dst = u16Value;
7287 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7288 }
7289 return rc;
7290}
7291
7292
7293/**
7294 * Stores a data dword.
7295 *
7296 * @returns Strict VBox status code.
7297 * @param pIemCpu The IEM per CPU data.
7298 * @param iSegReg The index of the segment register to use for
7299 * this access. The base and limits are checked.
7300 * @param GCPtrMem The address of the guest memory.
7301 * @param u32Value The value to store.
7302 */
7303static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7304{
7305 /* The lazy approach for now... */
7306 uint32_t *pu32Dst;
7307 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7308 if (rc == VINF_SUCCESS)
7309 {
7310 *pu32Dst = u32Value;
7311 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7312 }
7313 return rc;
7314}
7315
7316
7317/**
7318 * Stores a data qword.
7319 *
7320 * @returns Strict VBox status code.
7321 * @param pIemCpu The IEM per CPU data.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 * @param u64Value The value to store.
7326 */
7327static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7328{
7329 /* The lazy approach for now... */
7330 uint64_t *pu64Dst;
7331 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7332 if (rc == VINF_SUCCESS)
7333 {
7334 *pu64Dst = u64Value;
7335 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7336 }
7337 return rc;
7338}
7339
7340
7341/**
7342 * Stores a data dqword.
7343 *
7344 * @returns Strict VBox status code.
7345 * @param pIemCpu The IEM per CPU data.
7346 * @param iSegReg The index of the segment register to use for
7347 * this access. The base and limits are checked.
7348 * @param GCPtrMem The address of the guest memory.
7349 * @param u64Value The value to store.
7350 */
7351static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7352{
7353 /* The lazy approach for now... */
7354 uint128_t *pu128Dst;
7355 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7356 if (rc == VINF_SUCCESS)
7357 {
7358 *pu128Dst = u128Value;
7359 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7360 }
7361 return rc;
7362}
7363
7364
7365/**
7366 * Stores a data dqword, SSE aligned.
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pIemCpu The IEM per CPU data.
7370 * @param iSegReg The index of the segment register to use for
7371 * this access. The base and limits are checked.
7372 * @param GCPtrMem The address of the guest memory.
7373 * @param u64Value The value to store.
7374 */
7375static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7376{
7377 /* The lazy approach for now... */
7378 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7379 return iemRaiseGeneralProtectionFault0(pIemCpu);
7380
7381 uint128_t *pu128Dst;
7382 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7383 if (rc == VINF_SUCCESS)
7384 {
7385 *pu128Dst = u128Value;
7386 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7387 }
7388 return rc;
7389}
7390
7391
7392/**
7393 * Stores a descriptor register (sgdt, sidt).
7394 *
7395 * @returns Strict VBox status code.
7396 * @param pIemCpu The IEM per CPU data.
7397 * @param cbLimit The limit.
7398 * @param GCPTrBase The base address.
7399 * @param iSegReg The index of the segment register to use for
7400 * this access. The base and limits are checked.
7401 * @param GCPtrMem The address of the guest memory.
7402 * @param enmOpSize The effective operand size.
7403 */
7404static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
7405 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7406{
7407 uint8_t *pu8Src;
7408 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7409 (void **)&pu8Src,
7410 enmOpSize == IEMMODE_64BIT
7411 ? 2 + 8
7412 : enmOpSize == IEMMODE_32BIT
7413 ? 2 + 4
7414 : 2 + 3,
7415 iSegReg,
7416 GCPtrMem,
7417 IEM_ACCESS_DATA_W);
7418 if (rcStrict == VINF_SUCCESS)
7419 {
7420 pu8Src[0] = RT_BYTE1(cbLimit);
7421 pu8Src[1] = RT_BYTE2(cbLimit);
7422 pu8Src[2] = RT_BYTE1(GCPtrBase);
7423 pu8Src[3] = RT_BYTE2(GCPtrBase);
7424 pu8Src[4] = RT_BYTE3(GCPtrBase);
7425 if (enmOpSize == IEMMODE_16BIT)
7426 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7427 else
7428 {
7429 pu8Src[5] = RT_BYTE4(GCPtrBase);
7430 if (enmOpSize == IEMMODE_64BIT)
7431 {
7432 pu8Src[6] = RT_BYTE5(GCPtrBase);
7433 pu8Src[7] = RT_BYTE6(GCPtrBase);
7434 pu8Src[8] = RT_BYTE7(GCPtrBase);
7435 pu8Src[9] = RT_BYTE8(GCPtrBase);
7436 }
7437 }
7438 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7439 }
7440 return rcStrict;
7441}
7442
7443
7444/**
7445 * Pushes a word onto the stack.
7446 *
7447 * @returns Strict VBox status code.
7448 * @param pIemCpu The IEM per CPU data.
7449 * @param u16Value The value to push.
7450 */
7451static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7452{
7453 /* Increment the stack pointer. */
7454 uint64_t uNewRsp;
7455 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7456 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7457
7458 /* Write the word the lazy way. */
7459 uint16_t *pu16Dst;
7460 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7461 if (rc == VINF_SUCCESS)
7462 {
7463 *pu16Dst = u16Value;
7464 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7465 }
7466
7467 /* Commit the new RSP value unless we an access handler made trouble. */
7468 if (rc == VINF_SUCCESS)
7469 pCtx->rsp = uNewRsp;
7470
7471 return rc;
7472}
7473
7474
7475/**
7476 * Pushes a dword onto the stack.
7477 *
7478 * @returns Strict VBox status code.
7479 * @param pIemCpu The IEM per CPU data.
7480 * @param u32Value The value to push.
7481 */
7482static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7483{
7484 /* Increment the stack pointer. */
7485 uint64_t uNewRsp;
7486 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7487 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7488
7489 /* Write the dword the lazy way. */
7490 uint32_t *pu32Dst;
7491 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7492 if (rc == VINF_SUCCESS)
7493 {
7494 *pu32Dst = u32Value;
7495 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7496 }
7497
7498 /* Commit the new RSP value unless we an access handler made trouble. */
7499 if (rc == VINF_SUCCESS)
7500 pCtx->rsp = uNewRsp;
7501
7502 return rc;
7503}
7504
7505
7506/**
7507 * Pushes a dword segment register value onto the stack.
7508 *
7509 * @returns Strict VBox status code.
7510 * @param pIemCpu The IEM per CPU data.
7511 * @param u16Value The value to push.
7512 */
7513static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7514{
7515 /* Increment the stack pointer. */
7516 uint64_t uNewRsp;
7517 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7518 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7519
7520 VBOXSTRICTRC rc;
7521 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7522 {
7523 /* The recompiler writes a full dword. */
7524 uint32_t *pu32Dst;
7525 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7526 if (rc == VINF_SUCCESS)
7527 {
7528 *pu32Dst = u32Value;
7529 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7530 }
7531 }
7532 else
7533 {
7534 /* The intel docs talks about zero extending the selector register
7535 value. My actual intel CPU here might be zero extending the value
7536 but it still only writes the lower word... */
7537 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7538 * happens when crossing an electric page boundrary, is the high word
7539 * checked for write accessibility or not? Probably it is. What about
7540 * segment limits? */
7541 uint16_t *pu16Dst;
7542 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7543 if (rc == VINF_SUCCESS)
7544 {
7545 *pu16Dst = (uint16_t)u32Value;
7546 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7547 }
7548 }
7549
7550 /* Commit the new RSP value unless we an access handler made trouble. */
7551 if (rc == VINF_SUCCESS)
7552 pCtx->rsp = uNewRsp;
7553
7554 return rc;
7555}
7556
7557
7558/**
7559 * Pushes a qword onto the stack.
7560 *
7561 * @returns Strict VBox status code.
7562 * @param pIemCpu The IEM per CPU data.
7563 * @param u64Value The value to push.
7564 */
7565static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7566{
7567 /* Increment the stack pointer. */
7568 uint64_t uNewRsp;
7569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7570 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7571
7572 /* Write the word the lazy way. */
7573 uint64_t *pu64Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7575 if (rc == VINF_SUCCESS)
7576 {
7577 *pu64Dst = u64Value;
7578 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7579 }
7580
7581 /* Commit the new RSP value unless we an access handler made trouble. */
7582 if (rc == VINF_SUCCESS)
7583 pCtx->rsp = uNewRsp;
7584
7585 return rc;
7586}
7587
7588
7589/**
7590 * Pops a word from the stack.
7591 *
7592 * @returns Strict VBox status code.
7593 * @param pIemCpu The IEM per CPU data.
7594 * @param pu16Value Where to store the popped value.
7595 */
7596static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7597{
7598 /* Increment the stack pointer. */
7599 uint64_t uNewRsp;
7600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7601 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7602
7603 /* Write the word the lazy way. */
7604 uint16_t const *pu16Src;
7605 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7606 if (rc == VINF_SUCCESS)
7607 {
7608 *pu16Value = *pu16Src;
7609 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7610
7611 /* Commit the new RSP value. */
7612 if (rc == VINF_SUCCESS)
7613 pCtx->rsp = uNewRsp;
7614 }
7615
7616 return rc;
7617}
7618
7619
7620/**
7621 * Pops a dword from the stack.
7622 *
7623 * @returns Strict VBox status code.
7624 * @param pIemCpu The IEM per CPU data.
7625 * @param pu32Value Where to store the popped value.
7626 */
7627static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7628{
7629 /* Increment the stack pointer. */
7630 uint64_t uNewRsp;
7631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7632 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7633
7634 /* Write the word the lazy way. */
7635 uint32_t const *pu32Src;
7636 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7637 if (rc == VINF_SUCCESS)
7638 {
7639 *pu32Value = *pu32Src;
7640 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7641
7642 /* Commit the new RSP value. */
7643 if (rc == VINF_SUCCESS)
7644 pCtx->rsp = uNewRsp;
7645 }
7646
7647 return rc;
7648}
7649
7650
7651/**
7652 * Pops a qword from the stack.
7653 *
7654 * @returns Strict VBox status code.
7655 * @param pIemCpu The IEM per CPU data.
7656 * @param pu64Value Where to store the popped value.
7657 */
7658static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7659{
7660 /* Increment the stack pointer. */
7661 uint64_t uNewRsp;
7662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7663 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7664
7665 /* Write the word the lazy way. */
7666 uint64_t const *pu64Src;
7667 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7668 if (rc == VINF_SUCCESS)
7669 {
7670 *pu64Value = *pu64Src;
7671 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7672
7673 /* Commit the new RSP value. */
7674 if (rc == VINF_SUCCESS)
7675 pCtx->rsp = uNewRsp;
7676 }
7677
7678 return rc;
7679}
7680
7681
7682/**
7683 * Pushes a word onto the stack, using a temporary stack pointer.
7684 *
7685 * @returns Strict VBox status code.
7686 * @param pIemCpu The IEM per CPU data.
7687 * @param u16Value The value to push.
7688 * @param pTmpRsp Pointer to the temporary stack pointer.
7689 */
7690static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7691{
7692 /* Increment the stack pointer. */
7693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7694 RTUINT64U NewRsp = *pTmpRsp;
7695 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7696
7697 /* Write the word the lazy way. */
7698 uint16_t *pu16Dst;
7699 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7700 if (rc == VINF_SUCCESS)
7701 {
7702 *pu16Dst = u16Value;
7703 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7704 }
7705
7706 /* Commit the new RSP value unless we an access handler made trouble. */
7707 if (rc == VINF_SUCCESS)
7708 *pTmpRsp = NewRsp;
7709
7710 return rc;
7711}
7712
7713
7714/**
7715 * Pushes a dword onto the stack, using a temporary stack pointer.
7716 *
7717 * @returns Strict VBox status code.
7718 * @param pIemCpu The IEM per CPU data.
7719 * @param u32Value The value to push.
7720 * @param pTmpRsp Pointer to the temporary stack pointer.
7721 */
7722static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7723{
7724 /* Increment the stack pointer. */
7725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7726 RTUINT64U NewRsp = *pTmpRsp;
7727 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7728
7729 /* Write the word the lazy way. */
7730 uint32_t *pu32Dst;
7731 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7732 if (rc == VINF_SUCCESS)
7733 {
7734 *pu32Dst = u32Value;
7735 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7736 }
7737
7738 /* Commit the new RSP value unless we an access handler made trouble. */
7739 if (rc == VINF_SUCCESS)
7740 *pTmpRsp = NewRsp;
7741
7742 return rc;
7743}
7744
7745
7746/**
7747 * Pushes a dword onto the stack, using a temporary stack pointer.
7748 *
7749 * @returns Strict VBox status code.
7750 * @param pIemCpu The IEM per CPU data.
7751 * @param u64Value The value to push.
7752 * @param pTmpRsp Pointer to the temporary stack pointer.
7753 */
7754static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7755{
7756 /* Increment the stack pointer. */
7757 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7758 RTUINT64U NewRsp = *pTmpRsp;
7759 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7760
7761 /* Write the word the lazy way. */
7762 uint64_t *pu64Dst;
7763 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7764 if (rc == VINF_SUCCESS)
7765 {
7766 *pu64Dst = u64Value;
7767 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7768 }
7769
7770 /* Commit the new RSP value unless we an access handler made trouble. */
7771 if (rc == VINF_SUCCESS)
7772 *pTmpRsp = NewRsp;
7773
7774 return rc;
7775}
7776
7777
7778/**
7779 * Pops a word from the stack, using a temporary stack pointer.
7780 *
7781 * @returns Strict VBox status code.
7782 * @param pIemCpu The IEM per CPU data.
7783 * @param pu16Value Where to store the popped value.
7784 * @param pTmpRsp Pointer to the temporary stack pointer.
7785 */
7786static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7787{
7788 /* Increment the stack pointer. */
7789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7790 RTUINT64U NewRsp = *pTmpRsp;
7791 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7792
7793 /* Write the word the lazy way. */
7794 uint16_t const *pu16Src;
7795 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7796 if (rc == VINF_SUCCESS)
7797 {
7798 *pu16Value = *pu16Src;
7799 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7800
7801 /* Commit the new RSP value. */
7802 if (rc == VINF_SUCCESS)
7803 *pTmpRsp = NewRsp;
7804 }
7805
7806 return rc;
7807}
7808
7809
7810/**
7811 * Pops a dword from the stack, using a temporary stack pointer.
7812 *
7813 * @returns Strict VBox status code.
7814 * @param pIemCpu The IEM per CPU data.
7815 * @param pu32Value Where to store the popped value.
7816 * @param pTmpRsp Pointer to the temporary stack pointer.
7817 */
7818static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7819{
7820 /* Increment the stack pointer. */
7821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7822 RTUINT64U NewRsp = *pTmpRsp;
7823 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7824
7825 /* Write the word the lazy way. */
7826 uint32_t const *pu32Src;
7827 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7828 if (rc == VINF_SUCCESS)
7829 {
7830 *pu32Value = *pu32Src;
7831 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7832
7833 /* Commit the new RSP value. */
7834 if (rc == VINF_SUCCESS)
7835 *pTmpRsp = NewRsp;
7836 }
7837
7838 return rc;
7839}
7840
7841
7842/**
7843 * Pops a qword from the stack, using a temporary stack pointer.
7844 *
7845 * @returns Strict VBox status code.
7846 * @param pIemCpu The IEM per CPU data.
7847 * @param pu64Value Where to store the popped value.
7848 * @param pTmpRsp Pointer to the temporary stack pointer.
7849 */
7850static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7851{
7852 /* Increment the stack pointer. */
7853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7854 RTUINT64U NewRsp = *pTmpRsp;
7855 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7856
7857 /* Write the word the lazy way. */
7858 uint64_t const *pu64Src;
7859 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7860 if (rcStrict == VINF_SUCCESS)
7861 {
7862 *pu64Value = *pu64Src;
7863 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7864
7865 /* Commit the new RSP value. */
7866 if (rcStrict == VINF_SUCCESS)
7867 *pTmpRsp = NewRsp;
7868 }
7869
7870 return rcStrict;
7871}
7872
7873
7874/**
7875 * Begin a special stack push (used by interrupt, exceptions and such).
7876 *
7877 * This will raise #SS or #PF if appropriate.
7878 *
7879 * @returns Strict VBox status code.
7880 * @param pIemCpu The IEM per CPU data.
7881 * @param cbMem The number of bytes to push onto the stack.
7882 * @param ppvMem Where to return the pointer to the stack memory.
7883 * As with the other memory functions this could be
7884 * direct access or bounce buffered access, so
7885 * don't commit register until the commit call
7886 * succeeds.
7887 * @param puNewRsp Where to return the new RSP value. This must be
7888 * passed unchanged to
7889 * iemMemStackPushCommitSpecial().
7890 */
7891static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7892{
7893 Assert(cbMem < UINT8_MAX);
7894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7895 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7896 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7897}
7898
7899
7900/**
7901 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7902 *
7903 * This will update the rSP.
7904 *
7905 * @returns Strict VBox status code.
7906 * @param pIemCpu The IEM per CPU data.
7907 * @param pvMem The pointer returned by
7908 * iemMemStackPushBeginSpecial().
7909 * @param uNewRsp The new RSP value returned by
7910 * iemMemStackPushBeginSpecial().
7911 */
7912static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
7913{
7914 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
7915 if (rcStrict == VINF_SUCCESS)
7916 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7917 return rcStrict;
7918}
7919
7920
7921/**
7922 * Begin a special stack pop (used by iret, retf and such).
7923 *
7924 * This will raise \#SS or \#PF if appropriate.
7925 *
7926 * @returns Strict VBox status code.
7927 * @param pIemCpu The IEM per CPU data.
7928 * @param cbMem The number of bytes to push onto the stack.
7929 * @param ppvMem Where to return the pointer to the stack memory.
7930 * @param puNewRsp Where to return the new RSP value. This must be
7931 * passed unchanged to
7932 * iemMemStackPopCommitSpecial() or applied
7933 * manually if iemMemStackPopDoneSpecial() is used.
7934 */
7935static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7936{
7937 Assert(cbMem < UINT8_MAX);
7938 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7939 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7940 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7941}
7942
7943
7944/**
7945 * Continue a special stack pop (used by iret and retf).
7946 *
7947 * This will raise \#SS or \#PF if appropriate.
7948 *
7949 * @returns Strict VBox status code.
7950 * @param pIemCpu The IEM per CPU data.
7951 * @param cbMem The number of bytes to push onto the stack.
7952 * @param ppvMem Where to return the pointer to the stack memory.
7953 * @param puNewRsp Where to return the new RSP value. This must be
7954 * passed unchanged to
7955 * iemMemStackPopCommitSpecial() or applied
7956 * manually if iemMemStackPopDoneSpecial() is used.
7957 */
7958static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7959{
7960 Assert(cbMem < UINT8_MAX);
7961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7962 RTUINT64U NewRsp;
7963 NewRsp.u = *puNewRsp;
7964 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7965 *puNewRsp = NewRsp.u;
7966 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7967}
7968
7969
7970/**
7971 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7972 *
7973 * This will update the rSP.
7974 *
7975 * @returns Strict VBox status code.
7976 * @param pIemCpu The IEM per CPU data.
7977 * @param pvMem The pointer returned by
7978 * iemMemStackPopBeginSpecial().
7979 * @param uNewRsp The new RSP value returned by
7980 * iemMemStackPopBeginSpecial().
7981 */
7982static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7983{
7984 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7985 if (rcStrict == VINF_SUCCESS)
7986 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7987 return rcStrict;
7988}
7989
7990
7991/**
7992 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7993 * iemMemStackPopContinueSpecial).
7994 *
7995 * The caller will manually commit the rSP.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pIemCpu The IEM per CPU data.
7999 * @param pvMem The pointer returned by
8000 * iemMemStackPopBeginSpecial() or
8001 * iemMemStackPopContinueSpecial().
8002 */
8003static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8004{
8005 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8006}
8007
8008
8009/**
8010 * Fetches a system table byte.
8011 *
8012 * @returns Strict VBox status code.
8013 * @param pIemCpu The IEM per CPU data.
8014 * @param pbDst Where to return the byte.
8015 * @param iSegReg The index of the segment register to use for
8016 * this access. The base and limits are checked.
8017 * @param GCPtrMem The address of the guest memory.
8018 */
8019static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8020{
8021 /* The lazy approach for now... */
8022 uint8_t const *pbSrc;
8023 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8024 if (rc == VINF_SUCCESS)
8025 {
8026 *pbDst = *pbSrc;
8027 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8028 }
8029 return rc;
8030}
8031
8032
8033/**
8034 * Fetches a system table word.
8035 *
8036 * @returns Strict VBox status code.
8037 * @param pIemCpu The IEM per CPU data.
8038 * @param pu16Dst Where to return the word.
8039 * @param iSegReg The index of the segment register to use for
8040 * this access. The base and limits are checked.
8041 * @param GCPtrMem The address of the guest memory.
8042 */
8043static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8044{
8045 /* The lazy approach for now... */
8046 uint16_t const *pu16Src;
8047 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8048 if (rc == VINF_SUCCESS)
8049 {
8050 *pu16Dst = *pu16Src;
8051 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8052 }
8053 return rc;
8054}
8055
8056
8057/**
8058 * Fetches a system table dword.
8059 *
8060 * @returns Strict VBox status code.
8061 * @param pIemCpu The IEM per CPU data.
8062 * @param pu32Dst Where to return the dword.
8063 * @param iSegReg The index of the segment register to use for
8064 * this access. The base and limits are checked.
8065 * @param GCPtrMem The address of the guest memory.
8066 */
8067static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8068{
8069 /* The lazy approach for now... */
8070 uint32_t const *pu32Src;
8071 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8072 if (rc == VINF_SUCCESS)
8073 {
8074 *pu32Dst = *pu32Src;
8075 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8076 }
8077 return rc;
8078}
8079
8080
8081/**
8082 * Fetches a system table qword.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pIemCpu The IEM per CPU data.
8086 * @param pu64Dst Where to return the qword.
8087 * @param iSegReg The index of the segment register to use for
8088 * this access. The base and limits are checked.
8089 * @param GCPtrMem The address of the guest memory.
8090 */
8091static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8092{
8093 /* The lazy approach for now... */
8094 uint64_t const *pu64Src;
8095 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8096 if (rc == VINF_SUCCESS)
8097 {
8098 *pu64Dst = *pu64Src;
8099 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8100 }
8101 return rc;
8102}
8103
8104
8105/**
8106 * Fetches a descriptor table entry with caller specified error code.
8107 *
8108 * @returns Strict VBox status code.
8109 * @param pIemCpu The IEM per CPU.
8110 * @param pDesc Where to return the descriptor table entry.
8111 * @param uSel The selector which table entry to fetch.
8112 * @param uXcpt The exception to raise on table lookup error.
8113 * @param uErrorCode The error code associated with the exception.
8114 */
8115static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
8116 uint16_t uErrorCode)
8117{
8118 AssertPtr(pDesc);
8119 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8120
8121 /** @todo did the 286 require all 8 bytes to be accessible? */
8122 /*
8123 * Get the selector table base and check bounds.
8124 */
8125 RTGCPTR GCPtrBase;
8126 if (uSel & X86_SEL_LDT)
8127 {
8128 if ( !pCtx->ldtr.Attr.n.u1Present
8129 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8130 {
8131 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8132 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8133 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8134 uErrorCode, 0);
8135 }
8136
8137 Assert(pCtx->ldtr.Attr.n.u1Present);
8138 GCPtrBase = pCtx->ldtr.u64Base;
8139 }
8140 else
8141 {
8142 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8143 {
8144 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8145 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8146 uErrorCode, 0);
8147 }
8148 GCPtrBase = pCtx->gdtr.pGdt;
8149 }
8150
8151 /*
8152 * Read the legacy descriptor and maybe the long mode extensions if
8153 * required.
8154 */
8155 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8156 if (rcStrict == VINF_SUCCESS)
8157 {
8158 if ( !IEM_IS_LONG_MODE(pIemCpu)
8159 || pDesc->Legacy.Gen.u1DescType)
8160 pDesc->Long.au64[1] = 0;
8161 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8162 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8163 else
8164 {
8165 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8166 /** @todo is this the right exception? */
8167 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8168 }
8169 }
8170 return rcStrict;
8171}
8172
8173
8174/**
8175 * Fetches a descriptor table entry.
8176 *
8177 * @returns Strict VBox status code.
8178 * @param pIemCpu The IEM per CPU.
8179 * @param pDesc Where to return the descriptor table entry.
8180 * @param uSel The selector which table entry to fetch.
8181 * @param uXcpt The exception to raise on table lookup error.
8182 */
8183static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8184{
8185 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8186}
8187
8188
8189/**
8190 * Fakes a long mode stack selector for SS = 0.
8191 *
8192 * @param pDescSs Where to return the fake stack descriptor.
8193 * @param uDpl The DPL we want.
8194 */
8195static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8196{
8197 pDescSs->Long.au64[0] = 0;
8198 pDescSs->Long.au64[1] = 0;
8199 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8200 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8201 pDescSs->Long.Gen.u2Dpl = uDpl;
8202 pDescSs->Long.Gen.u1Present = 1;
8203 pDescSs->Long.Gen.u1Long = 1;
8204}
8205
8206
8207/**
8208 * Marks the selector descriptor as accessed (only non-system descriptors).
8209 *
8210 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8211 * will therefore skip the limit checks.
8212 *
8213 * @returns Strict VBox status code.
8214 * @param pIemCpu The IEM per CPU.
8215 * @param uSel The selector.
8216 */
8217static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8218{
8219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8220
8221 /*
8222 * Get the selector table base and calculate the entry address.
8223 */
8224 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8225 ? pCtx->ldtr.u64Base
8226 : pCtx->gdtr.pGdt;
8227 GCPtr += uSel & X86_SEL_MASK;
8228
8229 /*
8230 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8231 * ugly stuff to avoid this. This will make sure it's an atomic access
8232 * as well more or less remove any question about 8-bit or 32-bit accesss.
8233 */
8234 VBOXSTRICTRC rcStrict;
8235 uint32_t volatile *pu32;
8236 if ((GCPtr & 3) == 0)
8237 {
8238 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8239 GCPtr += 2 + 2;
8240 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8241 if (rcStrict != VINF_SUCCESS)
8242 return rcStrict;
8243 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8244 }
8245 else
8246 {
8247 /* The misaligned GDT/LDT case, map the whole thing. */
8248 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8249 if (rcStrict != VINF_SUCCESS)
8250 return rcStrict;
8251 switch ((uintptr_t)pu32 & 3)
8252 {
8253 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8254 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8255 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8256 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8257 }
8258 }
8259
8260 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8261}
8262
8263/** @} */
8264
8265
8266/*
8267 * Include the C/C++ implementation of instruction.
8268 */
8269#include "IEMAllCImpl.cpp.h"
8270
8271
8272
8273/** @name "Microcode" macros.
8274 *
8275 * The idea is that we should be able to use the same code to interpret
8276 * instructions as well as recompiler instructions. Thus this obfuscation.
8277 *
8278 * @{
8279 */
8280#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8281#define IEM_MC_END() }
8282#define IEM_MC_PAUSE() do {} while (0)
8283#define IEM_MC_CONTINUE() do {} while (0)
8284
8285/** Internal macro. */
8286#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8287 do \
8288 { \
8289 VBOXSTRICTRC rcStrict2 = a_Expr; \
8290 if (rcStrict2 != VINF_SUCCESS) \
8291 return rcStrict2; \
8292 } while (0)
8293
8294#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8295#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8296#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8297#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8298#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8299#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8300#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8301
8302#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8303#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8304 do { \
8305 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8306 return iemRaiseDeviceNotAvailable(pIemCpu); \
8307 } while (0)
8308#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8309 do { \
8310 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
8311 return iemRaiseMathFault(pIemCpu); \
8312 } while (0)
8313#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8314 do { \
8315 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8316 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
8317 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
8318 return iemRaiseUndefinedOpcode(pIemCpu); \
8319 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8320 return iemRaiseDeviceNotAvailable(pIemCpu); \
8321 } while (0)
8322#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8323 do { \
8324 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8325 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
8326 return iemRaiseUndefinedOpcode(pIemCpu); \
8327 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8328 return iemRaiseDeviceNotAvailable(pIemCpu); \
8329 } while (0)
8330#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8331 do { \
8332 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8333 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
8334 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
8335 return iemRaiseUndefinedOpcode(pIemCpu); \
8336 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8337 return iemRaiseDeviceNotAvailable(pIemCpu); \
8338 } while (0)
8339#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8340 do { \
8341 if (pIemCpu->uCpl != 0) \
8342 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8343 } while (0)
8344
8345
8346#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8347#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8348#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8349#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8350#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8351#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8352#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8353 uint32_t a_Name; \
8354 uint32_t *a_pName = &a_Name
8355#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8356 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8357
8358#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8359#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8360
8361#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8362#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8363#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8364#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8365#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8366#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8367#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8368#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8369#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8370#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8371#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8372#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8373#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8374#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8375#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8376#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8377#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8378#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8379#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8380#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8381#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8382#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8383#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8384#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8385#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8386#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8387#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8388#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8389#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8390/** @note Not for IOPL or IF testing or modification. */
8391#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8392#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8393#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
8394#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
8395
8396#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8397#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8398#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8399#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8400#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8401#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8402#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8403#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8404#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8405#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8406#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8407 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8408
8409#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8410#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8411/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8412 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8413#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8414#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8415/** @note Not for IOPL or IF testing or modification. */
8416#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8417
8418#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8419#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8420#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8421 do { \
8422 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8423 *pu32Reg += (a_u32Value); \
8424 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8425 } while (0)
8426#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8427
8428#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8429#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8430#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8431 do { \
8432 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8433 *pu32Reg -= (a_u32Value); \
8434 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8435 } while (0)
8436#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8437
8438#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8439#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8440#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8441#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8442#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8443#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8444#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8445
8446#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8447#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8448#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8449#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8450
8451#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8452#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8453#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8454
8455#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8456#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8457
8458#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8459#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8460#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8461
8462#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8463#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8464#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8465
8466#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8467
8468#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8469
8470#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8471#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8472#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8473 do { \
8474 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8475 *pu32Reg &= (a_u32Value); \
8476 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8477 } while (0)
8478#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8479
8480#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8481#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8482#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8483 do { \
8484 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8485 *pu32Reg |= (a_u32Value); \
8486 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8487 } while (0)
8488#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8489
8490
8491/** @note Not for IOPL or IF modification. */
8492#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8493/** @note Not for IOPL or IF modification. */
8494#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8495/** @note Not for IOPL or IF modification. */
8496#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8497
8498#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8499
8500
8501#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8502 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
8503#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8504 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
8505#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8506 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8507#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8508 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8509#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8510 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8511#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8512 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8513#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8514 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8515
8516#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8517 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
8518#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8519 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
8520#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8521 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
8522#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8523 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8524#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8525 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8526 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8527 } while (0)
8528#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8529 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8530 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8531 } while (0)
8532#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8533 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8534#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8535 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8536#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8537 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
8538
8539#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8541#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8543#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8544 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8545
8546#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8548#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8549 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8550#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8551 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8552
8553#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8555#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8557#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8559
8560#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8561 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8562
8563#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8564 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8565#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8566 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8567#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8568 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8569
8570#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8572#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8574#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8576
8577#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8579#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8581
8582
8583
8584#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8585 do { \
8586 uint8_t u8Tmp; \
8587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8588 (a_u16Dst) = u8Tmp; \
8589 } while (0)
8590#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8591 do { \
8592 uint8_t u8Tmp; \
8593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8594 (a_u32Dst) = u8Tmp; \
8595 } while (0)
8596#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8597 do { \
8598 uint8_t u8Tmp; \
8599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8600 (a_u64Dst) = u8Tmp; \
8601 } while (0)
8602#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8603 do { \
8604 uint16_t u16Tmp; \
8605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8606 (a_u32Dst) = u16Tmp; \
8607 } while (0)
8608#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8609 do { \
8610 uint16_t u16Tmp; \
8611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8612 (a_u64Dst) = u16Tmp; \
8613 } while (0)
8614#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8615 do { \
8616 uint32_t u32Tmp; \
8617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8618 (a_u64Dst) = u32Tmp; \
8619 } while (0)
8620
8621#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8622 do { \
8623 uint8_t u8Tmp; \
8624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8625 (a_u16Dst) = (int8_t)u8Tmp; \
8626 } while (0)
8627#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8628 do { \
8629 uint8_t u8Tmp; \
8630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8631 (a_u32Dst) = (int8_t)u8Tmp; \
8632 } while (0)
8633#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8634 do { \
8635 uint8_t u8Tmp; \
8636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8637 (a_u64Dst) = (int8_t)u8Tmp; \
8638 } while (0)
8639#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8640 do { \
8641 uint16_t u16Tmp; \
8642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8643 (a_u32Dst) = (int16_t)u16Tmp; \
8644 } while (0)
8645#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8646 do { \
8647 uint16_t u16Tmp; \
8648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8649 (a_u64Dst) = (int16_t)u16Tmp; \
8650 } while (0)
8651#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8652 do { \
8653 uint32_t u32Tmp; \
8654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8655 (a_u64Dst) = (int32_t)u32Tmp; \
8656 } while (0)
8657
8658#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8659 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8660#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8661 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8662#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8663 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8664#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8666
8667#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8669#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8670 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8671#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8673#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8674 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8675
8676#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8677#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8678#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8679#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8680#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8681#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8682#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8683 do { \
8684 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8685 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8686 } while (0)
8687
8688#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8689 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8690#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8691 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8692
8693
8694#define IEM_MC_PUSH_U16(a_u16Value) \
8695 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8696#define IEM_MC_PUSH_U32(a_u32Value) \
8697 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8698#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8699 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8700#define IEM_MC_PUSH_U64(a_u64Value) \
8701 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8702
8703#define IEM_MC_POP_U16(a_pu16Value) \
8704 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8705#define IEM_MC_POP_U32(a_pu32Value) \
8706 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8707#define IEM_MC_POP_U64(a_pu64Value) \
8708 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8709
8710/** Maps guest memory for direct or bounce buffered access.
8711 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8712 * @remarks May return.
8713 */
8714#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8715 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8716
8717/** Maps guest memory for direct or bounce buffered access.
8718 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8719 * @remarks May return.
8720 */
8721#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8722 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8723
8724/** Commits the memory and unmaps the guest memory.
8725 * @remarks May return.
8726 */
8727#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8728 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8729
8730/** Commits the memory and unmaps the guest memory unless the FPU status word
8731 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8732 * that would cause FLD not to store.
8733 *
8734 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8735 * store, while \#P will not.
8736 *
8737 * @remarks May in theory return - for now.
8738 */
8739#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8740 do { \
8741 if ( !(a_u16FSW & X86_FSW_ES) \
8742 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8743 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
8744 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8745 } while (0)
8746
8747/** Calculate efficient address from R/M. */
8748#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8749 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8750
8751#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8752#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8753#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8754#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8755#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8756#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8757#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8758
8759/**
8760 * Defers the rest of the instruction emulation to a C implementation routine
8761 * and returns, only taking the standard parameters.
8762 *
8763 * @param a_pfnCImpl The pointer to the C routine.
8764 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8765 */
8766#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8767
8768/**
8769 * Defers the rest of instruction emulation to a C implementation routine and
8770 * returns, taking one argument in addition to the standard ones.
8771 *
8772 * @param a_pfnCImpl The pointer to the C routine.
8773 * @param a0 The argument.
8774 */
8775#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8776
8777/**
8778 * Defers the rest of the instruction emulation to a C implementation routine
8779 * and returns, taking two arguments in addition to the standard ones.
8780 *
8781 * @param a_pfnCImpl The pointer to the C routine.
8782 * @param a0 The first extra argument.
8783 * @param a1 The second extra argument.
8784 */
8785#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8786
8787/**
8788 * Defers the rest of the instruction emulation to a C implementation routine
8789 * and returns, taking three arguments in addition to the standard ones.
8790 *
8791 * @param a_pfnCImpl The pointer to the C routine.
8792 * @param a0 The first extra argument.
8793 * @param a1 The second extra argument.
8794 * @param a2 The third extra argument.
8795 */
8796#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8797
8798/**
8799 * Defers the rest of the instruction emulation to a C implementation routine
8800 * and returns, taking four arguments in addition to the standard ones.
8801 *
8802 * @param a_pfnCImpl The pointer to the C routine.
8803 * @param a0 The first extra argument.
8804 * @param a1 The second extra argument.
8805 * @param a2 The third extra argument.
8806 * @param a3 The fourth extra argument.
8807 */
8808#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8809
8810/**
8811 * Defers the rest of the instruction emulation to a C implementation routine
8812 * and returns, taking two arguments in addition to the standard ones.
8813 *
8814 * @param a_pfnCImpl The pointer to the C routine.
8815 * @param a0 The first extra argument.
8816 * @param a1 The second extra argument.
8817 * @param a2 The third extra argument.
8818 * @param a3 The fourth extra argument.
8819 * @param a4 The fifth extra argument.
8820 */
8821#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8822
8823/**
8824 * Defers the entire instruction emulation to a C implementation routine and
8825 * returns, only taking the standard parameters.
8826 *
8827 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8828 *
8829 * @param a_pfnCImpl The pointer to the C routine.
8830 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8831 */
8832#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8833
8834/**
8835 * Defers the entire instruction emulation to a C implementation routine and
8836 * returns, taking one argument in addition to the standard ones.
8837 *
8838 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8839 *
8840 * @param a_pfnCImpl The pointer to the C routine.
8841 * @param a0 The argument.
8842 */
8843#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8844
8845/**
8846 * Defers the entire instruction emulation to a C implementation routine and
8847 * returns, taking two arguments in addition to the standard ones.
8848 *
8849 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8850 *
8851 * @param a_pfnCImpl The pointer to the C routine.
8852 * @param a0 The first extra argument.
8853 * @param a1 The second extra argument.
8854 */
8855#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8856
8857/**
8858 * Defers the entire instruction emulation to a C implementation routine and
8859 * returns, taking three arguments in addition to the standard ones.
8860 *
8861 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8862 *
8863 * @param a_pfnCImpl The pointer to the C routine.
8864 * @param a0 The first extra argument.
8865 * @param a1 The second extra argument.
8866 * @param a2 The third extra argument.
8867 */
8868#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8869
8870/**
8871 * Calls a FPU assembly implementation taking one visible argument.
8872 *
8873 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8874 * @param a0 The first extra argument.
8875 */
8876#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8877 do { \
8878 iemFpuPrepareUsage(pIemCpu); \
8879 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
8880 } while (0)
8881
8882/**
8883 * Calls a FPU assembly implementation taking two visible arguments.
8884 *
8885 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8886 * @param a0 The first extra argument.
8887 * @param a1 The second extra argument.
8888 */
8889#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8890 do { \
8891 iemFpuPrepareUsage(pIemCpu); \
8892 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
8893 } while (0)
8894
8895/**
8896 * Calls a FPU assembly implementation taking three visible arguments.
8897 *
8898 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8899 * @param a0 The first extra argument.
8900 * @param a1 The second extra argument.
8901 * @param a2 The third extra argument.
8902 */
8903#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8904 do { \
8905 iemFpuPrepareUsage(pIemCpu); \
8906 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
8907 } while (0)
8908
8909#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
8910 do { \
8911 (a_FpuData).FSW = (a_FSW); \
8912 (a_FpuData).r80Result = *(a_pr80Value); \
8913 } while (0)
8914
8915/** Pushes FPU result onto the stack. */
8916#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
8917 iemFpuPushResult(pIemCpu, &a_FpuData)
8918/** Pushes FPU result onto the stack and sets the FPUDP. */
8919#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
8920 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
8921
8922/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
8923#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
8924 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
8925
8926/** Stores FPU result in a stack register. */
8927#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
8928 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
8929/** Stores FPU result in a stack register and pops the stack. */
8930#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
8931 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
8932/** Stores FPU result in a stack register and sets the FPUDP. */
8933#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8934 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8935/** Stores FPU result in a stack register, sets the FPUDP, and pops the
8936 * stack. */
8937#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8938 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8939
8940/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
8941#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
8942 iemFpuUpdateOpcodeAndIp(pIemCpu)
8943/** Free a stack register (for FFREE and FFREEP). */
8944#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
8945 iemFpuStackFree(pIemCpu, a_iStReg)
8946/** Increment the FPU stack pointer. */
8947#define IEM_MC_FPU_STACK_INC_TOP() \
8948 iemFpuStackIncTop(pIemCpu)
8949/** Decrement the FPU stack pointer. */
8950#define IEM_MC_FPU_STACK_DEC_TOP() \
8951 iemFpuStackDecTop(pIemCpu)
8952
8953/** Updates the FSW, FOP, FPUIP, and FPUCS. */
8954#define IEM_MC_UPDATE_FSW(a_u16FSW) \
8955 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8956/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
8957#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
8958 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8959/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
8960#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8961 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8962/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
8963#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
8964 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8965/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
8966 * stack. */
8967#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8968 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8969/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
8970#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
8971 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8972
8973/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
8974#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
8975 iemFpuStackUnderflow(pIemCpu, a_iStDst)
8976/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8977 * stack. */
8978#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
8979 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
8980/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8981 * FPUDS. */
8982#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8983 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8984/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8985 * FPUDS. Pops stack. */
8986#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8987 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8988/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8989 * stack twice. */
8990#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8991 iemFpuStackUnderflowThenPopPop(pIemCpu)
8992/** Raises a FPU stack underflow exception for an instruction pushing a result
8993 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8994#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8995 iemFpuStackPushUnderflow(pIemCpu)
8996/** Raises a FPU stack underflow exception for an instruction pushing a result
8997 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8998#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8999 iemFpuStackPushUnderflowTwo(pIemCpu)
9000
9001/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9002 * FPUIP, FPUCS and FOP. */
9003#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9004 iemFpuStackPushOverflow(pIemCpu)
9005/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9006 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9007#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9008 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9009/** Indicates that we (might) have modified the FPU state. */
9010#define IEM_MC_USED_FPU() \
9011 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9012
9013/**
9014 * Calls a MMX assembly implementation taking two visible arguments.
9015 *
9016 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9017 * @param a0 The first extra argument.
9018 * @param a1 The second extra argument.
9019 */
9020#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9021 do { \
9022 iemFpuPrepareUsage(pIemCpu); \
9023 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9024 } while (0)
9025
9026/**
9027 * Calls a MMX assembly implementation taking three visible arguments.
9028 *
9029 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9030 * @param a0 The first extra argument.
9031 * @param a1 The second extra argument.
9032 * @param a2 The third extra argument.
9033 */
9034#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9035 do { \
9036 iemFpuPrepareUsage(pIemCpu); \
9037 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9038 } while (0)
9039
9040
9041/**
9042 * Calls a SSE assembly implementation taking two visible arguments.
9043 *
9044 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9045 * @param a0 The first extra argument.
9046 * @param a1 The second extra argument.
9047 */
9048#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9049 do { \
9050 iemFpuPrepareUsageSse(pIemCpu); \
9051 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9052 } while (0)
9053
9054/**
9055 * Calls a SSE assembly implementation taking three visible arguments.
9056 *
9057 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9058 * @param a0 The first extra argument.
9059 * @param a1 The second extra argument.
9060 * @param a2 The third extra argument.
9061 */
9062#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9063 do { \
9064 iemFpuPrepareUsageSse(pIemCpu); \
9065 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9066 } while (0)
9067
9068
9069/** @note Not for IOPL or IF testing. */
9070#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9071/** @note Not for IOPL or IF testing. */
9072#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9073/** @note Not for IOPL or IF testing. */
9074#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9075/** @note Not for IOPL or IF testing. */
9076#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9077/** @note Not for IOPL or IF testing. */
9078#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9079 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9080 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9081/** @note Not for IOPL or IF testing. */
9082#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9083 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9084 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9085/** @note Not for IOPL or IF testing. */
9086#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9087 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9088 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9089 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9090/** @note Not for IOPL or IF testing. */
9091#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9092 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9093 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9094 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9095#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9096#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9097#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9098/** @note Not for IOPL or IF testing. */
9099#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9100 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9101 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9102/** @note Not for IOPL or IF testing. */
9103#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9104 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9105 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9106/** @note Not for IOPL or IF testing. */
9107#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9108 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9109 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9110/** @note Not for IOPL or IF testing. */
9111#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9112 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9113 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9114/** @note Not for IOPL or IF testing. */
9115#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9116 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9117 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9118/** @note Not for IOPL or IF testing. */
9119#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9120 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9121 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9122#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9123#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9124#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9125 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9126#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9127 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9128#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9129 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9130#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9131 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9132#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9133 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9134#define IEM_MC_IF_FCW_IM() \
9135 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
9136
9137#define IEM_MC_ELSE() } else {
9138#define IEM_MC_ENDIF() } do {} while (0)
9139
9140/** @} */
9141
9142
9143/** @name Opcode Debug Helpers.
9144 * @{
9145 */
9146#ifdef DEBUG
9147# define IEMOP_MNEMONIC(a_szMnemonic) \
9148 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9149 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9150# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9151 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9152 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9153#else
9154# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9155# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9156#endif
9157
9158/** @} */
9159
9160
9161/** @name Opcode Helpers.
9162 * @{
9163 */
9164
9165/** The instruction raises an \#UD in real and V8086 mode. */
9166#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9167 do \
9168 { \
9169 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9170 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9171 } while (0)
9172
9173/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9174 * lock prefixed.
9175 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9176#define IEMOP_HLP_NO_LOCK_PREFIX() \
9177 do \
9178 { \
9179 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9180 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9181 } while (0)
9182
9183/** The instruction is not available in 64-bit mode, throw #UD if we're in
9184 * 64-bit mode. */
9185#define IEMOP_HLP_NO_64BIT() \
9186 do \
9187 { \
9188 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9189 return IEMOP_RAISE_INVALID_OPCODE(); \
9190 } while (0)
9191
9192/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9193 * 64-bit mode. */
9194#define IEMOP_HLP_ONLY_64BIT() \
9195 do \
9196 { \
9197 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9198 return IEMOP_RAISE_INVALID_OPCODE(); \
9199 } while (0)
9200
9201/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9202#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9203 do \
9204 { \
9205 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9206 iemRecalEffOpSize64Default(pIemCpu); \
9207 } while (0)
9208
9209/** The instruction has 64-bit operand size if 64-bit mode. */
9210#define IEMOP_HLP_64BIT_OP_SIZE() \
9211 do \
9212 { \
9213 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9214 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9215 } while (0)
9216
9217/** Only a REX prefix immediately preceeding the first opcode byte takes
9218 * effect. This macro helps ensuring this as well as logging bad guest code. */
9219#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9220 do \
9221 { \
9222 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9223 { \
9224 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9225 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9226 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9227 pIemCpu->uRexB = 0; \
9228 pIemCpu->uRexIndex = 0; \
9229 pIemCpu->uRexReg = 0; \
9230 iemRecalEffOpSize(pIemCpu); \
9231 } \
9232 } while (0)
9233
9234/**
9235 * Done decoding.
9236 */
9237#define IEMOP_HLP_DONE_DECODING() \
9238 do \
9239 { \
9240 /*nothing for now, maybe later... */ \
9241 } while (0)
9242
9243/**
9244 * Done decoding, raise \#UD exception if lock prefix present.
9245 */
9246#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9247 do \
9248 { \
9249 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9250 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9251 } while (0)
9252#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9253 do \
9254 { \
9255 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9256 { \
9257 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9258 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9259 } \
9260 } while (0)
9261#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9262 do \
9263 { \
9264 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9265 { \
9266 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9267 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9268 } \
9269 } while (0)
9270
9271
9272/**
9273 * Calculates the effective address of a ModR/M memory operand.
9274 *
9275 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9276 *
9277 * @return Strict VBox status code.
9278 * @param pIemCpu The IEM per CPU data.
9279 * @param bRm The ModRM byte.
9280 * @param cbImm The size of any immediate following the
9281 * effective address opcode bytes. Important for
9282 * RIP relative addressing.
9283 * @param pGCPtrEff Where to return the effective address.
9284 */
9285static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9286{
9287 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9288 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9289#define SET_SS_DEF() \
9290 do \
9291 { \
9292 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9293 pIemCpu->iEffSeg = X86_SREG_SS; \
9294 } while (0)
9295
9296 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9297 {
9298/** @todo Check the effective address size crap! */
9299 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9300 {
9301 uint16_t u16EffAddr;
9302
9303 /* Handle the disp16 form with no registers first. */
9304 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9305 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9306 else
9307 {
9308 /* Get the displacment. */
9309 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9310 {
9311 case 0: u16EffAddr = 0; break;
9312 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9313 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9314 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9315 }
9316
9317 /* Add the base and index registers to the disp. */
9318 switch (bRm & X86_MODRM_RM_MASK)
9319 {
9320 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9321 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9322 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9323 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9324 case 4: u16EffAddr += pCtx->si; break;
9325 case 5: u16EffAddr += pCtx->di; break;
9326 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9327 case 7: u16EffAddr += pCtx->bx; break;
9328 }
9329 }
9330
9331 *pGCPtrEff = u16EffAddr;
9332 }
9333 else
9334 {
9335 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9336 uint32_t u32EffAddr;
9337
9338 /* Handle the disp32 form with no registers first. */
9339 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9340 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9341 else
9342 {
9343 /* Get the register (or SIB) value. */
9344 switch ((bRm & X86_MODRM_RM_MASK))
9345 {
9346 case 0: u32EffAddr = pCtx->eax; break;
9347 case 1: u32EffAddr = pCtx->ecx; break;
9348 case 2: u32EffAddr = pCtx->edx; break;
9349 case 3: u32EffAddr = pCtx->ebx; break;
9350 case 4: /* SIB */
9351 {
9352 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9353
9354 /* Get the index and scale it. */
9355 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9356 {
9357 case 0: u32EffAddr = pCtx->eax; break;
9358 case 1: u32EffAddr = pCtx->ecx; break;
9359 case 2: u32EffAddr = pCtx->edx; break;
9360 case 3: u32EffAddr = pCtx->ebx; break;
9361 case 4: u32EffAddr = 0; /*none */ break;
9362 case 5: u32EffAddr = pCtx->ebp; break;
9363 case 6: u32EffAddr = pCtx->esi; break;
9364 case 7: u32EffAddr = pCtx->edi; break;
9365 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9366 }
9367 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9368
9369 /* add base */
9370 switch (bSib & X86_SIB_BASE_MASK)
9371 {
9372 case 0: u32EffAddr += pCtx->eax; break;
9373 case 1: u32EffAddr += pCtx->ecx; break;
9374 case 2: u32EffAddr += pCtx->edx; break;
9375 case 3: u32EffAddr += pCtx->ebx; break;
9376 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9377 case 5:
9378 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9379 {
9380 u32EffAddr += pCtx->ebp;
9381 SET_SS_DEF();
9382 }
9383 else
9384 {
9385 uint32_t u32Disp;
9386 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9387 u32EffAddr += u32Disp;
9388 }
9389 break;
9390 case 6: u32EffAddr += pCtx->esi; break;
9391 case 7: u32EffAddr += pCtx->edi; break;
9392 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9393 }
9394 break;
9395 }
9396 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9397 case 6: u32EffAddr = pCtx->esi; break;
9398 case 7: u32EffAddr = pCtx->edi; break;
9399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9400 }
9401
9402 /* Get and add the displacement. */
9403 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9404 {
9405 case 0:
9406 break;
9407 case 1:
9408 {
9409 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9410 u32EffAddr += i8Disp;
9411 break;
9412 }
9413 case 2:
9414 {
9415 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9416 u32EffAddr += u32Disp;
9417 break;
9418 }
9419 default:
9420 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9421 }
9422
9423 }
9424 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9425 *pGCPtrEff = u32EffAddr;
9426 else
9427 {
9428 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9429 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9430 }
9431 }
9432 }
9433 else
9434 {
9435 uint64_t u64EffAddr;
9436
9437 /* Handle the rip+disp32 form with no registers first. */
9438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9439 {
9440 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9441 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9442 }
9443 else
9444 {
9445 /* Get the register (or SIB) value. */
9446 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9447 {
9448 case 0: u64EffAddr = pCtx->rax; break;
9449 case 1: u64EffAddr = pCtx->rcx; break;
9450 case 2: u64EffAddr = pCtx->rdx; break;
9451 case 3: u64EffAddr = pCtx->rbx; break;
9452 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9453 case 6: u64EffAddr = pCtx->rsi; break;
9454 case 7: u64EffAddr = pCtx->rdi; break;
9455 case 8: u64EffAddr = pCtx->r8; break;
9456 case 9: u64EffAddr = pCtx->r9; break;
9457 case 10: u64EffAddr = pCtx->r10; break;
9458 case 11: u64EffAddr = pCtx->r11; break;
9459 case 13: u64EffAddr = pCtx->r13; break;
9460 case 14: u64EffAddr = pCtx->r14; break;
9461 case 15: u64EffAddr = pCtx->r15; break;
9462 /* SIB */
9463 case 4:
9464 case 12:
9465 {
9466 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9467
9468 /* Get the index and scale it. */
9469 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9470 {
9471 case 0: u64EffAddr = pCtx->rax; break;
9472 case 1: u64EffAddr = pCtx->rcx; break;
9473 case 2: u64EffAddr = pCtx->rdx; break;
9474 case 3: u64EffAddr = pCtx->rbx; break;
9475 case 4: u64EffAddr = 0; /*none */ break;
9476 case 5: u64EffAddr = pCtx->rbp; break;
9477 case 6: u64EffAddr = pCtx->rsi; break;
9478 case 7: u64EffAddr = pCtx->rdi; break;
9479 case 8: u64EffAddr = pCtx->r8; break;
9480 case 9: u64EffAddr = pCtx->r9; break;
9481 case 10: u64EffAddr = pCtx->r10; break;
9482 case 11: u64EffAddr = pCtx->r11; break;
9483 case 12: u64EffAddr = pCtx->r12; break;
9484 case 13: u64EffAddr = pCtx->r13; break;
9485 case 14: u64EffAddr = pCtx->r14; break;
9486 case 15: u64EffAddr = pCtx->r15; break;
9487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9488 }
9489 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9490
9491 /* add base */
9492 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9493 {
9494 case 0: u64EffAddr += pCtx->rax; break;
9495 case 1: u64EffAddr += pCtx->rcx; break;
9496 case 2: u64EffAddr += pCtx->rdx; break;
9497 case 3: u64EffAddr += pCtx->rbx; break;
9498 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9499 case 6: u64EffAddr += pCtx->rsi; break;
9500 case 7: u64EffAddr += pCtx->rdi; break;
9501 case 8: u64EffAddr += pCtx->r8; break;
9502 case 9: u64EffAddr += pCtx->r9; break;
9503 case 10: u64EffAddr += pCtx->r10; break;
9504 case 11: u64EffAddr += pCtx->r11; break;
9505 case 12: u64EffAddr += pCtx->r12; break;
9506 case 14: u64EffAddr += pCtx->r14; break;
9507 case 15: u64EffAddr += pCtx->r15; break;
9508 /* complicated encodings */
9509 case 5:
9510 case 13:
9511 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9512 {
9513 if (!pIemCpu->uRexB)
9514 {
9515 u64EffAddr += pCtx->rbp;
9516 SET_SS_DEF();
9517 }
9518 else
9519 u64EffAddr += pCtx->r13;
9520 }
9521 else
9522 {
9523 uint32_t u32Disp;
9524 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9525 u64EffAddr += (int32_t)u32Disp;
9526 }
9527 break;
9528 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9529 }
9530 break;
9531 }
9532 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9533 }
9534
9535 /* Get and add the displacement. */
9536 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9537 {
9538 case 0:
9539 break;
9540 case 1:
9541 {
9542 int8_t i8Disp;
9543 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9544 u64EffAddr += i8Disp;
9545 break;
9546 }
9547 case 2:
9548 {
9549 uint32_t u32Disp;
9550 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9551 u64EffAddr += (int32_t)u32Disp;
9552 break;
9553 }
9554 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9555 }
9556
9557 }
9558
9559 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9560 *pGCPtrEff = u64EffAddr;
9561 else
9562 {
9563 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9564 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9565 }
9566 }
9567
9568 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9569 return VINF_SUCCESS;
9570}
9571
9572/** @} */
9573
9574
9575
9576/*
9577 * Include the instructions
9578 */
9579#include "IEMAllInstructions.cpp.h"
9580
9581
9582
9583
9584#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9585
9586/**
9587 * Sets up execution verification mode.
9588 */
9589static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9590{
9591 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9592 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9593
9594 /*
9595 * Always note down the address of the current instruction.
9596 */
9597 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9598 pIemCpu->uOldRip = pOrgCtx->rip;
9599
9600 /*
9601 * Enable verification and/or logging.
9602 */
9603 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9604 if ( fNewNoRem
9605 && ( 0
9606#if 0 /* auto enable on first paged protected mode interrupt */
9607 || ( pOrgCtx->eflags.Bits.u1IF
9608 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9609 && TRPMHasTrap(pVCpu)
9610 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9611#endif
9612#if 0
9613 || ( pOrgCtx->cs == 0x10
9614 && ( pOrgCtx->rip == 0x90119e3e
9615 || pOrgCtx->rip == 0x901d9810)
9616#endif
9617#if 0 /* Auto enable DSL - FPU stuff. */
9618 || ( pOrgCtx->cs == 0x10
9619 && (// pOrgCtx->rip == 0xc02ec07f
9620 //|| pOrgCtx->rip == 0xc02ec082
9621 //|| pOrgCtx->rip == 0xc02ec0c9
9622 0
9623 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9624#endif
9625#if 0 /* Auto enable DSL - fstp st0 stuff. */
9626 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9627#endif
9628#if 0
9629 || pOrgCtx->rip == 0x9022bb3a
9630#endif
9631#if 0
9632 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9633#endif
9634#if 0
9635 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9636 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9637#endif
9638#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9639 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9640 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9641 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9642#endif
9643#if 0 /* NT4SP1 - xadd early boot. */
9644 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9645#endif
9646#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9647 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9648#endif
9649#if 0 /* NT4SP1 - cmpxchg (AMD). */
9650 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9651#endif
9652#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9653 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9654#endif
9655#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9656 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9657
9658#endif
9659#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9660 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9661
9662#endif
9663#if 0 /* NT4SP1 - frstor [ecx] */
9664 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9665#endif
9666#if 0 /* xxxxxx - All long mode code. */
9667 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9668#endif
9669#if 0 /* rep movsq linux 3.7 64-bit boot. */
9670 || (pOrgCtx->rip == 0x0000000000100241)
9671#endif
9672#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9673 || (pOrgCtx->rip == 0x000000000215e240)
9674#endif
9675#if 0 /* DOS's size-overridden iret to v8086. */
9676 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9677#endif
9678 )
9679 )
9680 {
9681 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9682 RTLogFlags(NULL, "enabled");
9683 fNewNoRem = false;
9684 }
9685 if (fNewNoRem != pIemCpu->fNoRem)
9686 {
9687 pIemCpu->fNoRem = fNewNoRem;
9688 if (!fNewNoRem)
9689 {
9690 LogAlways(("Enabling verification mode!\n"));
9691 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9692 }
9693 else
9694 LogAlways(("Disabling verification mode!\n"));
9695 }
9696
9697 /*
9698 * Switch state.
9699 */
9700 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9701 {
9702 static CPUMCTX s_DebugCtx; /* Ugly! */
9703
9704 s_DebugCtx = *pOrgCtx;
9705 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9706 }
9707
9708 /*
9709 * See if there is an interrupt pending in TRPM and inject it if we can.
9710 */
9711 pIemCpu->uInjectCpl = UINT8_MAX;
9712 if ( pOrgCtx->eflags.Bits.u1IF
9713 && TRPMHasTrap(pVCpu)
9714 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9715 {
9716 uint8_t u8TrapNo;
9717 TRPMEVENT enmType;
9718 RTGCUINT uErrCode;
9719 RTGCPTR uCr2;
9720 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9721 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9722 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9723 TRPMResetTrap(pVCpu);
9724 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9725 }
9726
9727 /*
9728 * Reset the counters.
9729 */
9730 pIemCpu->cIOReads = 0;
9731 pIemCpu->cIOWrites = 0;
9732 pIemCpu->fIgnoreRaxRdx = false;
9733 pIemCpu->fOverlappingMovs = false;
9734 pIemCpu->fProblematicMemory = false;
9735 pIemCpu->fUndefinedEFlags = 0;
9736
9737 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9738 {
9739 /*
9740 * Free all verification records.
9741 */
9742 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9743 pIemCpu->pIemEvtRecHead = NULL;
9744 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9745 do
9746 {
9747 while (pEvtRec)
9748 {
9749 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9750 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9751 pIemCpu->pFreeEvtRec = pEvtRec;
9752 pEvtRec = pNext;
9753 }
9754 pEvtRec = pIemCpu->pOtherEvtRecHead;
9755 pIemCpu->pOtherEvtRecHead = NULL;
9756 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9757 } while (pEvtRec);
9758 }
9759}
9760
9761
9762/**
9763 * Allocate an event record.
9764 * @returns Pointer to a record.
9765 */
9766static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9767{
9768 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9769 return NULL;
9770
9771 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9772 if (pEvtRec)
9773 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9774 else
9775 {
9776 if (!pIemCpu->ppIemEvtRecNext)
9777 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9778
9779 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9780 if (!pEvtRec)
9781 return NULL;
9782 }
9783 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9784 pEvtRec->pNext = NULL;
9785 return pEvtRec;
9786}
9787
9788
9789/**
9790 * IOMMMIORead notification.
9791 */
9792VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9793{
9794 PVMCPU pVCpu = VMMGetCpu(pVM);
9795 if (!pVCpu)
9796 return;
9797 PIEMCPU pIemCpu = &pVCpu->iem.s;
9798 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9799 if (!pEvtRec)
9800 return;
9801 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9802 pEvtRec->u.RamRead.GCPhys = GCPhys;
9803 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9804 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9805 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9806}
9807
9808
9809/**
9810 * IOMMMIOWrite notification.
9811 */
9812VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9813{
9814 PVMCPU pVCpu = VMMGetCpu(pVM);
9815 if (!pVCpu)
9816 return;
9817 PIEMCPU pIemCpu = &pVCpu->iem.s;
9818 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9819 if (!pEvtRec)
9820 return;
9821 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9822 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9823 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9824 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9825 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9826 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9827 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9828 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9829 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9830}
9831
9832
9833/**
9834 * IOMIOPortRead notification.
9835 */
9836VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9837{
9838 PVMCPU pVCpu = VMMGetCpu(pVM);
9839 if (!pVCpu)
9840 return;
9841 PIEMCPU pIemCpu = &pVCpu->iem.s;
9842 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9843 if (!pEvtRec)
9844 return;
9845 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9846 pEvtRec->u.IOPortRead.Port = Port;
9847 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9848 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9849 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9850}
9851
9852/**
9853 * IOMIOPortWrite notification.
9854 */
9855VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9856{
9857 PVMCPU pVCpu = VMMGetCpu(pVM);
9858 if (!pVCpu)
9859 return;
9860 PIEMCPU pIemCpu = &pVCpu->iem.s;
9861 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9862 if (!pEvtRec)
9863 return;
9864 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9865 pEvtRec->u.IOPortWrite.Port = Port;
9866 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9867 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9868 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9869 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9870}
9871
9872
9873VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9874{
9875 AssertFailed();
9876}
9877
9878
9879VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
9880{
9881 AssertFailed();
9882}
9883
9884
9885/**
9886 * Fakes and records an I/O port read.
9887 *
9888 * @returns VINF_SUCCESS.
9889 * @param pIemCpu The IEM per CPU data.
9890 * @param Port The I/O port.
9891 * @param pu32Value Where to store the fake value.
9892 * @param cbValue The size of the access.
9893 */
9894static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9895{
9896 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9897 if (pEvtRec)
9898 {
9899 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9900 pEvtRec->u.IOPortRead.Port = Port;
9901 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9902 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9903 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9904 }
9905 pIemCpu->cIOReads++;
9906 *pu32Value = 0xcccccccc;
9907 return VINF_SUCCESS;
9908}
9909
9910
9911/**
9912 * Fakes and records an I/O port write.
9913 *
9914 * @returns VINF_SUCCESS.
9915 * @param pIemCpu The IEM per CPU data.
9916 * @param Port The I/O port.
9917 * @param u32Value The value being written.
9918 * @param cbValue The size of the access.
9919 */
9920static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9921{
9922 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9923 if (pEvtRec)
9924 {
9925 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9926 pEvtRec->u.IOPortWrite.Port = Port;
9927 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9928 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9929 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9930 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9931 }
9932 pIemCpu->cIOWrites++;
9933 return VINF_SUCCESS;
9934}
9935
9936
9937/**
9938 * Used to add extra details about a stub case.
9939 * @param pIemCpu The IEM per CPU state.
9940 */
9941static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
9942{
9943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9944 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9945 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9946 char szRegs[4096];
9947 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
9948 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
9949 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
9950 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
9951 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
9952 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
9953 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
9954 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
9955 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
9956 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
9957 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
9958 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
9959 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
9960 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
9961 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
9962 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
9963 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
9964 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
9965 " efer=%016VR{efer}\n"
9966 " pat=%016VR{pat}\n"
9967 " sf_mask=%016VR{sf_mask}\n"
9968 "krnl_gs_base=%016VR{krnl_gs_base}\n"
9969 " lstar=%016VR{lstar}\n"
9970 " star=%016VR{star} cstar=%016VR{cstar}\n"
9971 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
9972 );
9973
9974 char szInstr1[256];
9975 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
9976 DBGF_DISAS_FLAGS_DEFAULT_MODE,
9977 szInstr1, sizeof(szInstr1), NULL);
9978 char szInstr2[256];
9979 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
9980 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9981 szInstr2, sizeof(szInstr2), NULL);
9982
9983 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9984}
9985
9986
9987/**
9988 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9989 * dump to the assertion info.
9990 *
9991 * @param pEvtRec The record to dump.
9992 */
9993static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9994{
9995 switch (pEvtRec->enmEvent)
9996 {
9997 case IEMVERIFYEVENT_IOPORT_READ:
9998 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9999 pEvtRec->u.IOPortWrite.Port,
10000 pEvtRec->u.IOPortWrite.cbValue);
10001 break;
10002 case IEMVERIFYEVENT_IOPORT_WRITE:
10003 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10004 pEvtRec->u.IOPortWrite.Port,
10005 pEvtRec->u.IOPortWrite.cbValue,
10006 pEvtRec->u.IOPortWrite.u32Value);
10007 break;
10008 case IEMVERIFYEVENT_RAM_READ:
10009 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10010 pEvtRec->u.RamRead.GCPhys,
10011 pEvtRec->u.RamRead.cb);
10012 break;
10013 case IEMVERIFYEVENT_RAM_WRITE:
10014 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10015 pEvtRec->u.RamWrite.GCPhys,
10016 pEvtRec->u.RamWrite.cb,
10017 (int)pEvtRec->u.RamWrite.cb,
10018 pEvtRec->u.RamWrite.ab);
10019 break;
10020 default:
10021 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10022 break;
10023 }
10024}
10025
10026
10027/**
10028 * Raises an assertion on the specified record, showing the given message with
10029 * a record dump attached.
10030 *
10031 * @param pIemCpu The IEM per CPU data.
10032 * @param pEvtRec1 The first record.
10033 * @param pEvtRec2 The second record.
10034 * @param pszMsg The message explaining why we're asserting.
10035 */
10036static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10037{
10038 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10039 iemVerifyAssertAddRecordDump(pEvtRec1);
10040 iemVerifyAssertAddRecordDump(pEvtRec2);
10041 iemVerifyAssertMsg2(pIemCpu);
10042 RTAssertPanic();
10043}
10044
10045
10046/**
10047 * Raises an assertion on the specified record, showing the given message with
10048 * a record dump attached.
10049 *
10050 * @param pIemCpu The IEM per CPU data.
10051 * @param pEvtRec1 The first record.
10052 * @param pszMsg The message explaining why we're asserting.
10053 */
10054static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10055{
10056 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10057 iemVerifyAssertAddRecordDump(pEvtRec);
10058 iemVerifyAssertMsg2(pIemCpu);
10059 RTAssertPanic();
10060}
10061
10062
10063/**
10064 * Verifies a write record.
10065 *
10066 * @param pIemCpu The IEM per CPU data.
10067 * @param pEvtRec The write record.
10068 * @param fRem Set if REM was doing the other executing. If clear
10069 * it was HM.
10070 */
10071static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10072{
10073 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10074 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10075 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10076 if ( RT_FAILURE(rc)
10077 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10078 {
10079 /* fend off ins */
10080 if ( !pIemCpu->cIOReads
10081 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10082 || ( pEvtRec->u.RamWrite.cb != 1
10083 && pEvtRec->u.RamWrite.cb != 2
10084 && pEvtRec->u.RamWrite.cb != 4) )
10085 {
10086 /* fend off ROMs and MMIO */
10087 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10088 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10089 {
10090 /* fend off fxsave */
10091 if (pEvtRec->u.RamWrite.cb != 512)
10092 {
10093 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10094 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10095 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10096 RTAssertMsg2Add("%s: %.*Rhxs\n"
10097 "iem: %.*Rhxs\n",
10098 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10099 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10100 iemVerifyAssertAddRecordDump(pEvtRec);
10101 iemVerifyAssertMsg2(pIemCpu);
10102 RTAssertPanic();
10103 }
10104 }
10105 }
10106 }
10107
10108}
10109
10110/**
10111 * Performs the post-execution verfication checks.
10112 */
10113static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10114{
10115 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10116 return;
10117
10118 /*
10119 * Switch back the state.
10120 */
10121 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10122 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10123 Assert(pOrgCtx != pDebugCtx);
10124 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10125
10126 /*
10127 * Execute the instruction in REM.
10128 */
10129 bool fRem = false;
10130 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10131 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10132 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10133#ifdef IEM_VERIFICATION_MODE_FULL_HM
10134 if ( HMIsEnabled(pVM)
10135 && pIemCpu->cIOReads == 0
10136 && pIemCpu->cIOWrites == 0
10137 && !pIemCpu->fProblematicMemory)
10138 {
10139 unsigned iLoops = 0;
10140 do
10141 {
10142 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10143 iLoops++;
10144 } while ( rc == VINF_SUCCESS
10145 || ( rc == VINF_EM_DBG_STEPPED
10146 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10147 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10148 || ( pOrgCtx->rip != pDebugCtx->rip
10149 && pIemCpu->uInjectCpl != UINT8_MAX
10150 && iLoops < 8) );
10151 }
10152#endif
10153 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10154 || rc == VINF_IOM_R3_IOPORT_READ
10155 || rc == VINF_IOM_R3_IOPORT_WRITE
10156 || rc == VINF_IOM_R3_MMIO_READ
10157 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10158 || rc == VINF_IOM_R3_MMIO_WRITE
10159 )
10160 {
10161 EMRemLock(pVM);
10162 rc = REMR3EmulateInstruction(pVM, pVCpu);
10163 AssertRC(rc);
10164 EMRemUnlock(pVM);
10165 fRem = true;
10166 }
10167
10168 /*
10169 * Compare the register states.
10170 */
10171 unsigned cDiffs = 0;
10172 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10173 {
10174 //Log(("REM and IEM ends up with different registers!\n"));
10175 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10176
10177# define CHECK_FIELD(a_Field) \
10178 do \
10179 { \
10180 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10181 { \
10182 switch (sizeof(pOrgCtx->a_Field)) \
10183 { \
10184 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10185 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10186 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10187 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10188 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10189 } \
10190 cDiffs++; \
10191 } \
10192 } while (0)
10193
10194# define CHECK_BIT_FIELD(a_Field) \
10195 do \
10196 { \
10197 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10198 { \
10199 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10200 cDiffs++; \
10201 } \
10202 } while (0)
10203
10204# define CHECK_SEL(a_Sel) \
10205 do \
10206 { \
10207 CHECK_FIELD(a_Sel.Sel); \
10208 CHECK_FIELD(a_Sel.Attr.u); \
10209 CHECK_FIELD(a_Sel.u64Base); \
10210 CHECK_FIELD(a_Sel.u32Limit); \
10211 CHECK_FIELD(a_Sel.fFlags); \
10212 } while (0)
10213
10214#if 1 /* The recompiler doesn't update these the intel way. */
10215 if (fRem)
10216 {
10217 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
10218 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
10219 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
10220 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
10221 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
10222 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
10223 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
10224 //pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK;
10225 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
10226 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
10227 }
10228#endif
10229 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
10230 {
10231 RTAssertMsg2Weak(" the FPU state differs\n");
10232 cDiffs++;
10233 CHECK_FIELD(fpu.FCW);
10234 CHECK_FIELD(fpu.FSW);
10235 CHECK_FIELD(fpu.FTW);
10236 CHECK_FIELD(fpu.FOP);
10237 CHECK_FIELD(fpu.FPUIP);
10238 CHECK_FIELD(fpu.CS);
10239 CHECK_FIELD(fpu.Rsrvd1);
10240 CHECK_FIELD(fpu.FPUDP);
10241 CHECK_FIELD(fpu.DS);
10242 CHECK_FIELD(fpu.Rsrvd2);
10243 CHECK_FIELD(fpu.MXCSR);
10244 CHECK_FIELD(fpu.MXCSR_MASK);
10245 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
10246 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
10247 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
10248 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
10249 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
10250 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
10251 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
10252 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
10253 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
10254 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
10255 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
10256 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
10257 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
10258 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
10259 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
10260 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
10261 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
10262 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
10263 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
10264 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
10265 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
10266 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
10267 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
10268 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
10269 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
10270 CHECK_FIELD(fpu.au32RsrvdRest[i]);
10271 }
10272 CHECK_FIELD(rip);
10273 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10274 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10275 {
10276 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10277 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10278 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10279 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10280 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10281 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10282 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10283 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10284 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10285 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10286 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10287 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10288 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10289 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10290 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10291 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10292 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10293 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10294 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10295 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10296 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10297 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10298 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10299 }
10300
10301 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10302 CHECK_FIELD(rax);
10303 CHECK_FIELD(rcx);
10304 if (!pIemCpu->fIgnoreRaxRdx)
10305 CHECK_FIELD(rdx);
10306 CHECK_FIELD(rbx);
10307 CHECK_FIELD(rsp);
10308 CHECK_FIELD(rbp);
10309 CHECK_FIELD(rsi);
10310 CHECK_FIELD(rdi);
10311 CHECK_FIELD(r8);
10312 CHECK_FIELD(r9);
10313 CHECK_FIELD(r10);
10314 CHECK_FIELD(r11);
10315 CHECK_FIELD(r12);
10316 CHECK_FIELD(r13);
10317 CHECK_SEL(cs);
10318 CHECK_SEL(ss);
10319 CHECK_SEL(ds);
10320 CHECK_SEL(es);
10321 CHECK_SEL(fs);
10322 CHECK_SEL(gs);
10323 CHECK_FIELD(cr0);
10324
10325 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10326 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10327 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10328 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10329 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10330 {
10331 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10332 { /* ignore */ }
10333 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10334 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10335 && fRem)
10336 { /* ignore */ }
10337 else
10338 CHECK_FIELD(cr2);
10339 }
10340 CHECK_FIELD(cr3);
10341 CHECK_FIELD(cr4);
10342 CHECK_FIELD(dr[0]);
10343 CHECK_FIELD(dr[1]);
10344 CHECK_FIELD(dr[2]);
10345 CHECK_FIELD(dr[3]);
10346 CHECK_FIELD(dr[6]);
10347 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10348 CHECK_FIELD(dr[7]);
10349 CHECK_FIELD(gdtr.cbGdt);
10350 CHECK_FIELD(gdtr.pGdt);
10351 CHECK_FIELD(idtr.cbIdt);
10352 CHECK_FIELD(idtr.pIdt);
10353 CHECK_SEL(ldtr);
10354 CHECK_SEL(tr);
10355 CHECK_FIELD(SysEnter.cs);
10356 CHECK_FIELD(SysEnter.eip);
10357 CHECK_FIELD(SysEnter.esp);
10358 CHECK_FIELD(msrEFER);
10359 CHECK_FIELD(msrSTAR);
10360 CHECK_FIELD(msrPAT);
10361 CHECK_FIELD(msrLSTAR);
10362 CHECK_FIELD(msrCSTAR);
10363 CHECK_FIELD(msrSFMASK);
10364 CHECK_FIELD(msrKERNELGSBASE);
10365
10366 if (cDiffs != 0)
10367 {
10368 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10369 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10370 iemVerifyAssertMsg2(pIemCpu);
10371 RTAssertPanic();
10372 }
10373# undef CHECK_FIELD
10374# undef CHECK_BIT_FIELD
10375 }
10376
10377 /*
10378 * If the register state compared fine, check the verification event
10379 * records.
10380 */
10381 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10382 {
10383 /*
10384 * Compare verficiation event records.
10385 * - I/O port accesses should be a 1:1 match.
10386 */
10387 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10388 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10389 while (pIemRec && pOtherRec)
10390 {
10391 /* Since we might miss RAM writes and reads, ignore reads and check
10392 that any written memory is the same extra ones. */
10393 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10394 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10395 && pIemRec->pNext)
10396 {
10397 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10398 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10399 pIemRec = pIemRec->pNext;
10400 }
10401
10402 /* Do the compare. */
10403 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10404 {
10405 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10406 break;
10407 }
10408 bool fEquals;
10409 switch (pIemRec->enmEvent)
10410 {
10411 case IEMVERIFYEVENT_IOPORT_READ:
10412 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10413 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10414 break;
10415 case IEMVERIFYEVENT_IOPORT_WRITE:
10416 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10417 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10418 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10419 break;
10420 case IEMVERIFYEVENT_RAM_READ:
10421 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10422 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10423 break;
10424 case IEMVERIFYEVENT_RAM_WRITE:
10425 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10426 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10427 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10428 break;
10429 default:
10430 fEquals = false;
10431 break;
10432 }
10433 if (!fEquals)
10434 {
10435 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10436 break;
10437 }
10438
10439 /* advance */
10440 pIemRec = pIemRec->pNext;
10441 pOtherRec = pOtherRec->pNext;
10442 }
10443
10444 /* Ignore extra writes and reads. */
10445 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10446 {
10447 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10448 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10449 pIemRec = pIemRec->pNext;
10450 }
10451 if (pIemRec != NULL)
10452 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10453 else if (pOtherRec != NULL)
10454 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10455 }
10456 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10457}
10458
10459#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10460
10461/* stubs */
10462static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10463{
10464 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10465 return VERR_INTERNAL_ERROR;
10466}
10467
10468static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10469{
10470 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10471 return VERR_INTERNAL_ERROR;
10472}
10473
10474#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10475
10476
10477#ifdef LOG_ENABLED
10478/**
10479 * Logs the current instruction.
10480 * @param pVCpu The cross context virtual CPU structure of the caller.
10481 * @param pCtx The current CPU context.
10482 * @param fSameCtx Set if we have the same context information as the VMM,
10483 * clear if we may have already executed an instruction in
10484 * our debug context. When clear, we assume IEMCPU holds
10485 * valid CPU mode info.
10486 */
10487static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10488{
10489# ifdef IN_RING3
10490 if (LogIs2Enabled())
10491 {
10492 char szInstr[256];
10493 uint32_t cbInstr = 0;
10494 if (fSameCtx)
10495 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10496 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10497 szInstr, sizeof(szInstr), &cbInstr);
10498 else
10499 {
10500 uint32_t fFlags = 0;
10501 switch (pVCpu->iem.s.enmCpuMode)
10502 {
10503 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10504 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10505 case IEMMODE_16BIT:
10506 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10507 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10508 else
10509 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10510 break;
10511 }
10512 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10513 szInstr, sizeof(szInstr), &cbInstr);
10514 }
10515
10516 Log2(("****\n"
10517 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10518 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10519 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10520 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10521 " %s\n"
10522 ,
10523 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10524 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10525 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10526 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10527 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
10528 szInstr));
10529
10530 if (LogIs3Enabled())
10531 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10532 }
10533 else
10534# endif
10535 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10536 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10537}
10538#endif
10539
10540
10541/**
10542 * Makes status code addjustments (pass up from I/O and access handler)
10543 * as well as maintaining statistics.
10544 *
10545 * @returns Strict VBox status code to pass up.
10546 * @param pIemCpu The IEM per CPU data.
10547 * @param rcStrict The status from executing an instruction.
10548 */
10549DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10550{
10551 if (rcStrict != VINF_SUCCESS)
10552 {
10553 if (RT_SUCCESS(rcStrict))
10554 {
10555 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10556 || rcStrict == VINF_IOM_R3_IOPORT_READ
10557 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10558 || rcStrict == VINF_IOM_R3_MMIO_READ
10559 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10560 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10561 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10562 int32_t const rcPassUp = pIemCpu->rcPassUp;
10563 if (rcPassUp == VINF_SUCCESS)
10564 pIemCpu->cRetInfStatuses++;
10565 else if ( rcPassUp < VINF_EM_FIRST
10566 || rcPassUp > VINF_EM_LAST
10567 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10568 {
10569 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10570 pIemCpu->cRetPassUpStatus++;
10571 rcStrict = rcPassUp;
10572 }
10573 else
10574 {
10575 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10576 pIemCpu->cRetInfStatuses++;
10577 }
10578 }
10579 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10580 pIemCpu->cRetAspectNotImplemented++;
10581 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10582 pIemCpu->cRetInstrNotImplemented++;
10583#ifdef IEM_VERIFICATION_MODE_FULL
10584 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10585 rcStrict = VINF_SUCCESS;
10586#endif
10587 else
10588 pIemCpu->cRetErrStatuses++;
10589 }
10590 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10591 {
10592 pIemCpu->cRetPassUpStatus++;
10593 rcStrict = pIemCpu->rcPassUp;
10594 }
10595
10596 return rcStrict;
10597}
10598
10599
10600/**
10601 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10602 * IEMExecOneWithPrefetchedByPC.
10603 *
10604 * @return Strict VBox status code.
10605 * @param pVCpu The current virtual CPU.
10606 * @param pIemCpu The IEM per CPU data.
10607 * @param fExecuteInhibit If set, execute the instruction following CLI,
10608 * POP SS and MOV SS,GR.
10609 */
10610DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10611{
10612 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10613 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10614 if (rcStrict == VINF_SUCCESS)
10615 pIemCpu->cInstructions++;
10616 if (pIemCpu->cActiveMappings > 0)
10617 iemMemRollback(pIemCpu);
10618//#ifdef DEBUG
10619// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10620//#endif
10621
10622 /* Execute the next instruction as well if a cli, pop ss or
10623 mov ss, Gr has just completed successfully. */
10624 if ( fExecuteInhibit
10625 && rcStrict == VINF_SUCCESS
10626 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10627 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10628 {
10629 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10630 if (rcStrict == VINF_SUCCESS)
10631 {
10632# ifdef LOG_ENABLED
10633 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10634# endif
10635 IEM_OPCODE_GET_NEXT_U8(&b);
10636 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10637 if (rcStrict == VINF_SUCCESS)
10638 pIemCpu->cInstructions++;
10639 if (pIemCpu->cActiveMappings > 0)
10640 iemMemRollback(pIemCpu);
10641 }
10642 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10643 }
10644
10645 /*
10646 * Return value fiddling, statistics and sanity assertions.
10647 */
10648 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10649
10650 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10651 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10652#if defined(IEM_VERIFICATION_MODE_FULL)
10653 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10654 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10655 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10657#endif
10658 return rcStrict;
10659}
10660
10661
10662#ifdef IN_RC
10663/**
10664 * Re-enters raw-mode or ensure we return to ring-3.
10665 *
10666 * @returns rcStrict, maybe modified.
10667 * @param pIemCpu The IEM CPU structure.
10668 * @param pVCpu The cross context virtual CPU structure of the caller.
10669 * @param pCtx The current CPU context.
10670 * @param rcStrict The status code returne by the interpreter.
10671 */
10672DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10673{
10674 if (!pIemCpu->fInPatchCode)
10675 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
10676 return rcStrict;
10677}
10678#endif
10679
10680
10681/**
10682 * Execute one instruction.
10683 *
10684 * @return Strict VBox status code.
10685 * @param pVCpu The current virtual CPU.
10686 */
10687VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10688{
10689 PIEMCPU pIemCpu = &pVCpu->iem.s;
10690
10691#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10692 iemExecVerificationModeSetup(pIemCpu);
10693#endif
10694#ifdef LOG_ENABLED
10695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10696 iemLogCurInstr(pVCpu, pCtx, true);
10697#endif
10698
10699 /*
10700 * Do the decoding and emulation.
10701 */
10702 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10703 if (rcStrict == VINF_SUCCESS)
10704 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10705
10706#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10707 /*
10708 * Assert some sanity.
10709 */
10710 iemExecVerificationModeCheck(pIemCpu);
10711#endif
10712#ifdef IN_RC
10713 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10714#endif
10715 if (rcStrict != VINF_SUCCESS)
10716 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10717 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10718 return rcStrict;
10719}
10720
10721
10722VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10723{
10724 PIEMCPU pIemCpu = &pVCpu->iem.s;
10725 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10726 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10727
10728 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10729 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10730 if (rcStrict == VINF_SUCCESS)
10731 {
10732 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10733 if (pcbWritten)
10734 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10735 }
10736
10737#ifdef IN_RC
10738 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10739#endif
10740 return rcStrict;
10741}
10742
10743
10744VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10745 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10746{
10747 PIEMCPU pIemCpu = &pVCpu->iem.s;
10748 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10749 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10750
10751 VBOXSTRICTRC rcStrict;
10752 if ( cbOpcodeBytes
10753 && pCtx->rip == OpcodeBytesPC)
10754 {
10755 iemInitDecoder(pIemCpu, false);
10756 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10757 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10758 rcStrict = VINF_SUCCESS;
10759 }
10760 else
10761 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10762 if (rcStrict == VINF_SUCCESS)
10763 {
10764 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10765 }
10766
10767#ifdef IN_RC
10768 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10769#endif
10770 return rcStrict;
10771}
10772
10773
10774VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10775{
10776 PIEMCPU pIemCpu = &pVCpu->iem.s;
10777 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10778 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10779
10780 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10781 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10782 if (rcStrict == VINF_SUCCESS)
10783 {
10784 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10785 if (pcbWritten)
10786 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10787 }
10788
10789#ifdef IN_RC
10790 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10791#endif
10792 return rcStrict;
10793}
10794
10795
10796VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10797 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10798{
10799 PIEMCPU pIemCpu = &pVCpu->iem.s;
10800 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10801 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10802
10803 VBOXSTRICTRC rcStrict;
10804 if ( cbOpcodeBytes
10805 && pCtx->rip == OpcodeBytesPC)
10806 {
10807 iemInitDecoder(pIemCpu, true);
10808 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10809 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10810 rcStrict = VINF_SUCCESS;
10811 }
10812 else
10813 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10814 if (rcStrict == VINF_SUCCESS)
10815 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10816
10817#ifdef IN_RC
10818 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10819#endif
10820 return rcStrict;
10821}
10822
10823
10824VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10825{
10826 PIEMCPU pIemCpu = &pVCpu->iem.s;
10827
10828 /*
10829 * See if there is an interrupt pending in TRPM and inject it if we can.
10830 */
10831#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10833# ifdef IEM_VERIFICATION_MODE_FULL
10834 pIemCpu->uInjectCpl = UINT8_MAX;
10835# endif
10836 if ( pCtx->eflags.Bits.u1IF
10837 && TRPMHasTrap(pVCpu)
10838 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
10839 {
10840 uint8_t u8TrapNo;
10841 TRPMEVENT enmType;
10842 RTGCUINT uErrCode;
10843 RTGCPTR uCr2;
10844 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10845 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10846 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10847 TRPMResetTrap(pVCpu);
10848 }
10849#else
10850 iemExecVerificationModeSetup(pIemCpu);
10851 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10852#endif
10853
10854 /*
10855 * Log the state.
10856 */
10857#ifdef LOG_ENABLED
10858 iemLogCurInstr(pVCpu, pCtx, true);
10859#endif
10860
10861 /*
10862 * Do the decoding and emulation.
10863 */
10864 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10865 if (rcStrict == VINF_SUCCESS)
10866 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10867
10868#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10869 /*
10870 * Assert some sanity.
10871 */
10872 iemExecVerificationModeCheck(pIemCpu);
10873#endif
10874
10875 /*
10876 * Maybe re-enter raw-mode and log.
10877 */
10878#ifdef IN_RC
10879 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10880#endif
10881 if (rcStrict != VINF_SUCCESS)
10882 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10883 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10884 return rcStrict;
10885}
10886
10887
10888
10889/**
10890 * Injects a trap, fault, abort, software interrupt or external interrupt.
10891 *
10892 * The parameter list matches TRPMQueryTrapAll pretty closely.
10893 *
10894 * @returns Strict VBox status code.
10895 * @param pVCpu The current virtual CPU.
10896 * @param u8TrapNo The trap number.
10897 * @param enmType What type is it (trap/fault/abort), software
10898 * interrupt or hardware interrupt.
10899 * @param uErrCode The error code if applicable.
10900 * @param uCr2 The CR2 value if applicable.
10901 * @param cbInstr The instruction length (only relevant for
10902 * software interrupts).
10903 */
10904VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10905 uint8_t cbInstr)
10906{
10907 iemInitDecoder(&pVCpu->iem.s, false);
10908#ifdef DBGFTRACE_ENABLED
10909 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10910 u8TrapNo, enmType, uErrCode, uCr2);
10911#endif
10912
10913 uint32_t fFlags;
10914 switch (enmType)
10915 {
10916 case TRPM_HARDWARE_INT:
10917 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10918 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10919 uErrCode = uCr2 = 0;
10920 break;
10921
10922 case TRPM_SOFTWARE_INT:
10923 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10924 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10925 uErrCode = uCr2 = 0;
10926 break;
10927
10928 case TRPM_TRAP:
10929 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10930 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10931 if (u8TrapNo == X86_XCPT_PF)
10932 fFlags |= IEM_XCPT_FLAGS_CR2;
10933 switch (u8TrapNo)
10934 {
10935 case X86_XCPT_DF:
10936 case X86_XCPT_TS:
10937 case X86_XCPT_NP:
10938 case X86_XCPT_SS:
10939 case X86_XCPT_PF:
10940 case X86_XCPT_AC:
10941 fFlags |= IEM_XCPT_FLAGS_ERR;
10942 break;
10943
10944 case X86_XCPT_NMI:
10945 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
10946 break;
10947 }
10948 break;
10949
10950 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10951 }
10952
10953 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10954}
10955
10956
10957/**
10958 * Injects the active TRPM event.
10959 *
10960 * @returns Strict VBox status code.
10961 * @param pVCpu Pointer to the VMCPU.
10962 */
10963VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
10964{
10965#ifndef IEM_IMPLEMENTS_TASKSWITCH
10966 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10967#else
10968 uint8_t u8TrapNo;
10969 TRPMEVENT enmType;
10970 RTGCUINT uErrCode;
10971 RTGCUINTPTR uCr2;
10972 uint8_t cbInstr;
10973 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
10974 if (RT_FAILURE(rc))
10975 return rc;
10976
10977 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10978
10979 /** @todo Are there any other codes that imply the event was successfully
10980 * delivered to the guest? See @bugref{6607}. */
10981 if ( rcStrict == VINF_SUCCESS
10982 || rcStrict == VINF_IEM_RAISED_XCPT)
10983 {
10984 TRPMResetTrap(pVCpu);
10985 }
10986 return rcStrict;
10987#endif
10988}
10989
10990
10991VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10992{
10993 return VERR_NOT_IMPLEMENTED;
10994}
10995
10996
10997VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10998{
10999 return VERR_NOT_IMPLEMENTED;
11000}
11001
11002
11003#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11004/**
11005 * Executes a IRET instruction with default operand size.
11006 *
11007 * This is for PATM.
11008 *
11009 * @returns VBox status code.
11010 * @param pVCpu The current virtual CPU.
11011 * @param pCtxCore The register frame.
11012 */
11013VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11014{
11015 PIEMCPU pIemCpu = &pVCpu->iem.s;
11016 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11017
11018 iemCtxCoreToCtx(pCtx, pCtxCore);
11019 iemInitDecoder(pIemCpu);
11020 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11021 if (rcStrict == VINF_SUCCESS)
11022 iemCtxToCtxCore(pCtxCore, pCtx);
11023 else
11024 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11025 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11026 return rcStrict;
11027}
11028#endif
11029
11030
11031
11032/**
11033 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11034 *
11035 * This API ASSUMES that the caller has already verified that the guest code is
11036 * allowed to access the I/O port. (The I/O port is in the DX register in the
11037 * guest state.)
11038 *
11039 * @returns Strict VBox status code.
11040 * @param pVCpu The cross context per virtual CPU structure.
11041 * @param cbValue The size of the I/O port access (1, 2, or 4).
11042 * @param enmAddrMode The addressing mode.
11043 * @param fRepPrefix Indicates whether a repeat prefix is used
11044 * (doesn't matter which for this instruction).
11045 * @param cbInstr The instruction length in bytes.
11046 * @param iEffSeg The effective segment address.
11047 */
11048VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11049 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11050{
11051 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11052 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11053
11054 /*
11055 * State init.
11056 */
11057 PIEMCPU pIemCpu = &pVCpu->iem.s;
11058 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11059
11060 /*
11061 * Switch orgy for getting to the right handler.
11062 */
11063 VBOXSTRICTRC rcStrict;
11064 if (fRepPrefix)
11065 {
11066 switch (enmAddrMode)
11067 {
11068 case IEMMODE_16BIT:
11069 switch (cbValue)
11070 {
11071 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11072 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11073 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11074 default:
11075 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11076 }
11077 break;
11078
11079 case IEMMODE_32BIT:
11080 switch (cbValue)
11081 {
11082 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11083 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11084 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11085 default:
11086 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11087 }
11088 break;
11089
11090 case IEMMODE_64BIT:
11091 switch (cbValue)
11092 {
11093 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11094 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11095 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11096 default:
11097 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11098 }
11099 break;
11100
11101 default:
11102 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11103 }
11104 }
11105 else
11106 {
11107 switch (enmAddrMode)
11108 {
11109 case IEMMODE_16BIT:
11110 switch (cbValue)
11111 {
11112 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11113 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11114 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11115 default:
11116 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11117 }
11118 break;
11119
11120 case IEMMODE_32BIT:
11121 switch (cbValue)
11122 {
11123 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11124 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11125 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11126 default:
11127 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11128 }
11129 break;
11130
11131 case IEMMODE_64BIT:
11132 switch (cbValue)
11133 {
11134 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11135 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11136 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11137 default:
11138 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11139 }
11140 break;
11141
11142 default:
11143 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11144 }
11145 }
11146
11147 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11148}
11149
11150
11151/**
11152 * Interface for HM and EM for executing string I/O IN (read) instructions.
11153 *
11154 * This API ASSUMES that the caller has already verified that the guest code is
11155 * allowed to access the I/O port. (The I/O port is in the DX register in the
11156 * guest state.)
11157 *
11158 * @returns Strict VBox status code.
11159 * @param pVCpu The cross context per virtual CPU structure.
11160 * @param cbValue The size of the I/O port access (1, 2, or 4).
11161 * @param enmAddrMode The addressing mode.
11162 * @param fRepPrefix Indicates whether a repeat prefix is used
11163 * (doesn't matter which for this instruction).
11164 * @param cbInstr The instruction length in bytes.
11165 */
11166VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11167 bool fRepPrefix, uint8_t cbInstr)
11168{
11169 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11170
11171 /*
11172 * State init.
11173 */
11174 PIEMCPU pIemCpu = &pVCpu->iem.s;
11175 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11176
11177 /*
11178 * Switch orgy for getting to the right handler.
11179 */
11180 VBOXSTRICTRC rcStrict;
11181 if (fRepPrefix)
11182 {
11183 switch (enmAddrMode)
11184 {
11185 case IEMMODE_16BIT:
11186 switch (cbValue)
11187 {
11188 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11189 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11190 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11191 default:
11192 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11193 }
11194 break;
11195
11196 case IEMMODE_32BIT:
11197 switch (cbValue)
11198 {
11199 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11200 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11201 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11202 default:
11203 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11204 }
11205 break;
11206
11207 case IEMMODE_64BIT:
11208 switch (cbValue)
11209 {
11210 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11211 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11212 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11213 default:
11214 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11215 }
11216 break;
11217
11218 default:
11219 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11220 }
11221 }
11222 else
11223 {
11224 switch (enmAddrMode)
11225 {
11226 case IEMMODE_16BIT:
11227 switch (cbValue)
11228 {
11229 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11230 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11231 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11232 default:
11233 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11234 }
11235 break;
11236
11237 case IEMMODE_32BIT:
11238 switch (cbValue)
11239 {
11240 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11241 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11242 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11243 default:
11244 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11245 }
11246 break;
11247
11248 case IEMMODE_64BIT:
11249 switch (cbValue)
11250 {
11251 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11252 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11253 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11254 default:
11255 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11256 }
11257 break;
11258
11259 default:
11260 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11261 }
11262 }
11263
11264 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11265}
11266
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette