VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 51434

Last change on this file since 51434 was 51434, checked in by vboxsync, 11 years ago

VMM/IEM: nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 418.7 KB
Line 
1/* $Id: IEMAll.cpp 51434 2014-05-28 08:14:55Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78//#define IEM_IMPLEMENTS_TASKSWITCH
79
80/*******************************************************************************
81* Header Files *
82*******************************************************************************/
83#define LOG_GROUP LOG_GROUP_IEM
84#include <VBox/vmm/iem.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/pdm.h>
87#include <VBox/vmm/pgm.h>
88#include <internal/pgm.h>
89#include <VBox/vmm/iom.h>
90#include <VBox/vmm/em.h>
91#include <VBox/vmm/hm.h>
92#include <VBox/vmm/tm.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/dbgftrace.h>
95#ifdef VBOX_WITH_RAW_MODE_NOT_R0
96# include <VBox/vmm/patm.h>
97# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
98# include <VBox/vmm/csam.h>
99# endif
100#endif
101#include "IEMInternal.h"
102#ifdef IEM_VERIFICATION_MODE_FULL
103# include <VBox/vmm/rem.h>
104# include <VBox/vmm/mm.h>
105#endif
106#include <VBox/vmm/vm.h>
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <VBox/param.h>
110#include <VBox/dis.h>
111#include <VBox/disopcode.h>
112#include <iprt/assert.h>
113#include <iprt/string.h>
114#include <iprt/x86.h>
115
116
117
118/*******************************************************************************
119* Structures and Typedefs *
120*******************************************************************************/
121/** @typedef PFNIEMOP
122 * Pointer to an opcode decoder function.
123 */
124
125/** @def FNIEMOP_DEF
126 * Define an opcode decoder function.
127 *
128 * We're using macors for this so that adding and removing parameters as well as
129 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
130 *
131 * @param a_Name The function name.
132 */
133
134
135#if defined(__GNUC__) && defined(RT_ARCH_X86)
136typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
137# define FNIEMOP_DEF(a_Name) \
138 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
139# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
140 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
141# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
142 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
143
144#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
145typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
146# define FNIEMOP_DEF(a_Name) \
147 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
148# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
149 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
150# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
151 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
152
153#elif defined(__GNUC__)
154typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
155# define FNIEMOP_DEF(a_Name) \
156 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#else
163typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
164# define FNIEMOP_DEF(a_Name) \
165 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
170
171#endif
172
173
174/**
175 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
176 */
177typedef union IEMSELDESC
178{
179 /** The legacy view. */
180 X86DESC Legacy;
181 /** The long mode view. */
182 X86DESC64 Long;
183} IEMSELDESC;
184/** Pointer to a selector descriptor table entry. */
185typedef IEMSELDESC *PIEMSELDESC;
186
187
188/*******************************************************************************
189* Defined Constants And Macros *
190*******************************************************************************/
191/** @name IEM status codes.
192 *
193 * Not quite sure how this will play out in the end, just aliasing safe status
194 * codes for now.
195 *
196 * @{ */
197#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
198/** @} */
199
200/** Temporary hack to disable the double execution. Will be removed in favor
201 * of a dedicated execution mode in EM. */
202//#define IEM_VERIFICATION_MODE_NO_REM
203
204/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
205 * due to GCC lacking knowledge about the value range of a switch. */
206#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
207
208/**
209 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
210 * occation.
211 */
212#ifdef LOG_ENABLED
213# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
214 do { \
215 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
216 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
217 } while (0)
218#else
219# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
220 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
221#endif
222
223/**
224 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
225 * occation using the supplied logger statement.
226 *
227 * @param a_LoggerArgs What to log on failure.
228 */
229#ifdef LOG_ENABLED
230# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
231 do { \
232 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
233 /*LogFunc(a_LoggerArgs);*/ \
234 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
235 } while (0)
236#else
237# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
238 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
239#endif
240
241/**
242 * Call an opcode decoder function.
243 *
244 * We're using macors for this so that adding and removing parameters can be
245 * done as we please. See FNIEMOP_DEF.
246 */
247#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
248
249/**
250 * Call a common opcode decoder function taking one extra argument.
251 *
252 * We're using macors for this so that adding and removing parameters can be
253 * done as we please. See FNIEMOP_DEF_1.
254 */
255#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
256
257/**
258 * Call a common opcode decoder function taking one extra argument.
259 *
260 * We're using macors for this so that adding and removing parameters can be
261 * done as we please. See FNIEMOP_DEF_1.
262 */
263#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
264
265/**
266 * Check if we're currently executing in real or virtual 8086 mode.
267 *
268 * @returns @c true if it is, @c false if not.
269 * @param a_pIemCpu The IEM state of the current CPU.
270 */
271#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
272
273/**
274 * Check if we're currently executing in virtual 8086 mode.
275 *
276 * @returns @c true if it is, @c false if not.
277 * @param a_pIemCpu The IEM state of the current CPU.
278 */
279#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
280
281/**
282 * Check if we're currently executing in long mode.
283 *
284 * @returns @c true if it is, @c false if not.
285 * @param a_pIemCpu The IEM state of the current CPU.
286 */
287#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
288
289/**
290 * Check if we're currently executing in real mode.
291 *
292 * @returns @c true if it is, @c false if not.
293 * @param a_pIemCpu The IEM state of the current CPU.
294 */
295#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
296
297/**
298 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
299 */
300#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
301
302/**
303 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
304 */
305#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
306
307/**
308 * Tests if at least on of the specified AMD CPUID features (extended) are
309 * marked present.
310 */
311#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
312
313/**
314 * Checks if an Intel CPUID feature is present.
315 */
316#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
317 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
318 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
319
320/**
321 * Checks if an Intel CPUID feature is present.
322 */
323#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
324 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
325
326/**
327 * Checks if an Intel CPUID feature is present in the host CPU.
328 */
329#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
330 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
331
332/**
333 * Evaluates to true if we're presenting an Intel CPU to the guest.
334 */
335#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
336
337/**
338 * Evaluates to true if we're presenting an AMD CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
341
342/**
343 * Check if the address is canonical.
344 */
345#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
346
347
348/*******************************************************************************
349* Global Variables *
350*******************************************************************************/
351extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
352
353
354/** Function table for the ADD instruction. */
355static const IEMOPBINSIZES g_iemAImpl_add =
356{
357 iemAImpl_add_u8, iemAImpl_add_u8_locked,
358 iemAImpl_add_u16, iemAImpl_add_u16_locked,
359 iemAImpl_add_u32, iemAImpl_add_u32_locked,
360 iemAImpl_add_u64, iemAImpl_add_u64_locked
361};
362
363/** Function table for the ADC instruction. */
364static const IEMOPBINSIZES g_iemAImpl_adc =
365{
366 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
367 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
368 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
369 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
370};
371
372/** Function table for the SUB instruction. */
373static const IEMOPBINSIZES g_iemAImpl_sub =
374{
375 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
376 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
377 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
378 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
379};
380
381/** Function table for the SBB instruction. */
382static const IEMOPBINSIZES g_iemAImpl_sbb =
383{
384 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
385 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
386 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
387 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
388};
389
390/** Function table for the OR instruction. */
391static const IEMOPBINSIZES g_iemAImpl_or =
392{
393 iemAImpl_or_u8, iemAImpl_or_u8_locked,
394 iemAImpl_or_u16, iemAImpl_or_u16_locked,
395 iemAImpl_or_u32, iemAImpl_or_u32_locked,
396 iemAImpl_or_u64, iemAImpl_or_u64_locked
397};
398
399/** Function table for the XOR instruction. */
400static const IEMOPBINSIZES g_iemAImpl_xor =
401{
402 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
403 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
404 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
405 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
406};
407
408/** Function table for the AND instruction. */
409static const IEMOPBINSIZES g_iemAImpl_and =
410{
411 iemAImpl_and_u8, iemAImpl_and_u8_locked,
412 iemAImpl_and_u16, iemAImpl_and_u16_locked,
413 iemAImpl_and_u32, iemAImpl_and_u32_locked,
414 iemAImpl_and_u64, iemAImpl_and_u64_locked
415};
416
417/** Function table for the CMP instruction.
418 * @remarks Making operand order ASSUMPTIONS.
419 */
420static const IEMOPBINSIZES g_iemAImpl_cmp =
421{
422 iemAImpl_cmp_u8, NULL,
423 iemAImpl_cmp_u16, NULL,
424 iemAImpl_cmp_u32, NULL,
425 iemAImpl_cmp_u64, NULL
426};
427
428/** Function table for the TEST instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431static const IEMOPBINSIZES g_iemAImpl_test =
432{
433 iemAImpl_test_u8, NULL,
434 iemAImpl_test_u16, NULL,
435 iemAImpl_test_u32, NULL,
436 iemAImpl_test_u64, NULL
437};
438
439/** Function table for the BT instruction. */
440static const IEMOPBINSIZES g_iemAImpl_bt =
441{
442 NULL, NULL,
443 iemAImpl_bt_u16, NULL,
444 iemAImpl_bt_u32, NULL,
445 iemAImpl_bt_u64, NULL
446};
447
448/** Function table for the BTC instruction. */
449static const IEMOPBINSIZES g_iemAImpl_btc =
450{
451 NULL, NULL,
452 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
453 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
454 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
455};
456
457/** Function table for the BTR instruction. */
458static const IEMOPBINSIZES g_iemAImpl_btr =
459{
460 NULL, NULL,
461 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
462 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
463 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
464};
465
466/** Function table for the BTS instruction. */
467static const IEMOPBINSIZES g_iemAImpl_bts =
468{
469 NULL, NULL,
470 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
471 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
472 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
473};
474
475/** Function table for the BSF instruction. */
476static const IEMOPBINSIZES g_iemAImpl_bsf =
477{
478 NULL, NULL,
479 iemAImpl_bsf_u16, NULL,
480 iemAImpl_bsf_u32, NULL,
481 iemAImpl_bsf_u64, NULL
482};
483
484/** Function table for the BSR instruction. */
485static const IEMOPBINSIZES g_iemAImpl_bsr =
486{
487 NULL, NULL,
488 iemAImpl_bsr_u16, NULL,
489 iemAImpl_bsr_u32, NULL,
490 iemAImpl_bsr_u64, NULL
491};
492
493/** Function table for the IMUL instruction. */
494static const IEMOPBINSIZES g_iemAImpl_imul_two =
495{
496 NULL, NULL,
497 iemAImpl_imul_two_u16, NULL,
498 iemAImpl_imul_two_u32, NULL,
499 iemAImpl_imul_two_u64, NULL
500};
501
502/** Group 1 /r lookup table. */
503static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
504{
505 &g_iemAImpl_add,
506 &g_iemAImpl_or,
507 &g_iemAImpl_adc,
508 &g_iemAImpl_sbb,
509 &g_iemAImpl_and,
510 &g_iemAImpl_sub,
511 &g_iemAImpl_xor,
512 &g_iemAImpl_cmp
513};
514
515/** Function table for the INC instruction. */
516static const IEMOPUNARYSIZES g_iemAImpl_inc =
517{
518 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
519 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
520 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
521 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
522};
523
524/** Function table for the DEC instruction. */
525static const IEMOPUNARYSIZES g_iemAImpl_dec =
526{
527 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
528 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
529 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
530 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
531};
532
533/** Function table for the NEG instruction. */
534static const IEMOPUNARYSIZES g_iemAImpl_neg =
535{
536 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
537 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
538 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
539 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
540};
541
542/** Function table for the NOT instruction. */
543static const IEMOPUNARYSIZES g_iemAImpl_not =
544{
545 iemAImpl_not_u8, iemAImpl_not_u8_locked,
546 iemAImpl_not_u16, iemAImpl_not_u16_locked,
547 iemAImpl_not_u32, iemAImpl_not_u32_locked,
548 iemAImpl_not_u64, iemAImpl_not_u64_locked
549};
550
551
552/** Function table for the ROL instruction. */
553static const IEMOPSHIFTSIZES g_iemAImpl_rol =
554{
555 iemAImpl_rol_u8,
556 iemAImpl_rol_u16,
557 iemAImpl_rol_u32,
558 iemAImpl_rol_u64
559};
560
561/** Function table for the ROR instruction. */
562static const IEMOPSHIFTSIZES g_iemAImpl_ror =
563{
564 iemAImpl_ror_u8,
565 iemAImpl_ror_u16,
566 iemAImpl_ror_u32,
567 iemAImpl_ror_u64
568};
569
570/** Function table for the RCL instruction. */
571static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
572{
573 iemAImpl_rcl_u8,
574 iemAImpl_rcl_u16,
575 iemAImpl_rcl_u32,
576 iemAImpl_rcl_u64
577};
578
579/** Function table for the RCR instruction. */
580static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
581{
582 iemAImpl_rcr_u8,
583 iemAImpl_rcr_u16,
584 iemAImpl_rcr_u32,
585 iemAImpl_rcr_u64
586};
587
588/** Function table for the SHL instruction. */
589static const IEMOPSHIFTSIZES g_iemAImpl_shl =
590{
591 iemAImpl_shl_u8,
592 iemAImpl_shl_u16,
593 iemAImpl_shl_u32,
594 iemAImpl_shl_u64
595};
596
597/** Function table for the SHR instruction. */
598static const IEMOPSHIFTSIZES g_iemAImpl_shr =
599{
600 iemAImpl_shr_u8,
601 iemAImpl_shr_u16,
602 iemAImpl_shr_u32,
603 iemAImpl_shr_u64
604};
605
606/** Function table for the SAR instruction. */
607static const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615
616/** Function table for the MUL instruction. */
617static const IEMOPMULDIVSIZES g_iemAImpl_mul =
618{
619 iemAImpl_mul_u8,
620 iemAImpl_mul_u16,
621 iemAImpl_mul_u32,
622 iemAImpl_mul_u64
623};
624
625/** Function table for the IMUL instruction working implicitly on rAX. */
626static const IEMOPMULDIVSIZES g_iemAImpl_imul =
627{
628 iemAImpl_imul_u8,
629 iemAImpl_imul_u16,
630 iemAImpl_imul_u32,
631 iemAImpl_imul_u64
632};
633
634/** Function table for the DIV instruction. */
635static const IEMOPMULDIVSIZES g_iemAImpl_div =
636{
637 iemAImpl_div_u8,
638 iemAImpl_div_u16,
639 iemAImpl_div_u32,
640 iemAImpl_div_u64
641};
642
643/** Function table for the MUL instruction. */
644static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
645{
646 iemAImpl_idiv_u8,
647 iemAImpl_idiv_u16,
648 iemAImpl_idiv_u32,
649 iemAImpl_idiv_u64
650};
651
652/** Function table for the SHLD instruction */
653static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
654{
655 iemAImpl_shld_u16,
656 iemAImpl_shld_u32,
657 iemAImpl_shld_u64,
658};
659
660/** Function table for the SHRD instruction */
661static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
662{
663 iemAImpl_shrd_u16,
664 iemAImpl_shrd_u32,
665 iemAImpl_shrd_u64,
666};
667
668
669/** Function table for the PUNPCKLBW instruction */
670static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
671/** Function table for the PUNPCKLBD instruction */
672static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
673/** Function table for the PUNPCKLDQ instruction */
674static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
675/** Function table for the PUNPCKLQDQ instruction */
676static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
677
678/** Function table for the PUNPCKHBW instruction */
679static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
680/** Function table for the PUNPCKHBD instruction */
681static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
682/** Function table for the PUNPCKHDQ instruction */
683static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
684/** Function table for the PUNPCKHQDQ instruction */
685static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
686
687/** Function table for the PXOR instruction */
688static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
689/** Function table for the PCMPEQB instruction */
690static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
691/** Function table for the PCMPEQW instruction */
692static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
693/** Function table for the PCMPEQD instruction */
694static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
695
696
697#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
698/** What IEM just wrote. */
699uint8_t g_abIemWrote[256];
700/** How much IEM just wrote. */
701size_t g_cbIemWrote;
702#endif
703
704
705/*******************************************************************************
706* Internal Functions *
707*******************************************************************************/
708static VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
709static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
710static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
711static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
712/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
713static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
714static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
715static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
716static VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
717static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
718static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
719static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
720static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
721static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
722static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
723static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
724static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
725static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
726static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
727static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
728static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
729static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
730static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
731static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
732static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
733static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
734static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
735static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
736static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
737static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
738static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
739static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
740static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
741
742#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
743static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
744#endif
745static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
746static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
747
748
749
750/**
751 * Sets the pass up status.
752 *
753 * @returns VINF_SUCCESS.
754 * @param pIemCpu The per CPU IEM state of the calling thread.
755 * @param rcPassUp The pass up status. Must be informational.
756 * VINF_SUCCESS is not allowed.
757 */
758static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
759{
760 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
761
762 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
763 if (rcOldPassUp == VINF_SUCCESS)
764 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
765 /* If both are EM scheduling codes, use EM priority rules. */
766 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
767 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
768 {
769 if (rcPassUp < rcOldPassUp)
770 {
771 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
772 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
773 }
774 else
775 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
776 }
777 /* Override EM scheduling with specific status code. */
778 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
779 {
780 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
781 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
782 }
783 /* Don't override specific status code, first come first served. */
784 else
785 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Initializes the execution state.
792 *
793 * @param pIemCpu The per CPU IEM state.
794 * @param fBypassHandlers Whether to bypass access handlers.
795 */
796DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
800
801#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
810#endif
811
812#ifdef VBOX_WITH_RAW_MODE_NOT_R0
813 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
814#endif
815 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
816 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
817 ? IEMMODE_64BIT
818 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
819 ? IEMMODE_32BIT
820 : IEMMODE_16BIT;
821 pIemCpu->enmCpuMode = enmMode;
822#ifdef VBOX_STRICT
823 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
824 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
825 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
826 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
827 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
828 pIemCpu->uRexReg = 127;
829 pIemCpu->uRexB = 127;
830 pIemCpu->uRexIndex = 127;
831 pIemCpu->iEffSeg = 127;
832 pIemCpu->offOpcode = 127;
833 pIemCpu->cbOpcode = 127;
834#endif
835
836 pIemCpu->cActiveMappings = 0;
837 pIemCpu->iNextMapping = 0;
838 pIemCpu->rcPassUp = VINF_SUCCESS;
839 pIemCpu->fBypassHandlers = fBypassHandlers;
840#ifdef VBOX_WITH_RAW_MODE_NOT_R0
841 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
842 && pCtx->cs.u64Base == 0
843 && pCtx->cs.u32Limit == UINT32_MAX
844 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
845 if (!pIemCpu->fInPatchCode)
846 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
847#endif
848}
849
850
851/**
852 * Initializes the decoder state.
853 *
854 * @param pIemCpu The per CPU IEM state.
855 * @param fBypassHandlers Whether to bypass access handlers.
856 */
857DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
858{
859 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
860 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
861
862#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
863 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
864 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
865 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
867 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
870 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
871#endif
872
873#ifdef VBOX_WITH_RAW_MODE_NOT_R0
874 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
875#endif
876 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
877#ifdef IEM_VERIFICATION_MODE_FULL
878 if (pIemCpu->uInjectCpl != UINT8_MAX)
879 pIemCpu->uCpl = pIemCpu->uInjectCpl;
880#endif
881 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
882 ? IEMMODE_64BIT
883 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
884 ? IEMMODE_32BIT
885 : IEMMODE_16BIT;
886 pIemCpu->enmCpuMode = enmMode;
887 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
888 pIemCpu->enmEffAddrMode = enmMode;
889 if (enmMode != IEMMODE_64BIT)
890 {
891 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
892 pIemCpu->enmEffOpSize = enmMode;
893 }
894 else
895 {
896 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
897 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
898 }
899 pIemCpu->fPrefixes = 0;
900 pIemCpu->uRexReg = 0;
901 pIemCpu->uRexB = 0;
902 pIemCpu->uRexIndex = 0;
903 pIemCpu->iEffSeg = X86_SREG_DS;
904 pIemCpu->offOpcode = 0;
905 pIemCpu->cbOpcode = 0;
906 pIemCpu->cActiveMappings = 0;
907 pIemCpu->iNextMapping = 0;
908 pIemCpu->rcPassUp = VINF_SUCCESS;
909 pIemCpu->fBypassHandlers = fBypassHandlers;
910#ifdef VBOX_WITH_RAW_MODE_NOT_R0
911 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
912 && pCtx->cs.u64Base == 0
913 && pCtx->cs.u32Limit == UINT32_MAX
914 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
915 if (!pIemCpu->fInPatchCode)
916 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
917#endif
918
919#ifdef DBGFTRACE_ENABLED
920 switch (enmMode)
921 {
922 case IEMMODE_64BIT:
923 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
924 break;
925 case IEMMODE_32BIT:
926 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
927 break;
928 case IEMMODE_16BIT:
929 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
930 break;
931 }
932#endif
933}
934
935
936/**
937 * Prefetch opcodes the first time when starting executing.
938 *
939 * @returns Strict VBox status code.
940 * @param pIemCpu The IEM state.
941 * @param fBypassHandlers Whether to bypass access handlers.
942 */
943static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
944{
945#ifdef IEM_VERIFICATION_MODE_FULL
946 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
947#endif
948 iemInitDecoder(pIemCpu, fBypassHandlers);
949
950 /*
951 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
952 *
953 * First translate CS:rIP to a physical address.
954 */
955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
956 uint32_t cbToTryRead;
957 RTGCPTR GCPtrPC;
958 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
959 {
960 cbToTryRead = PAGE_SIZE;
961 GCPtrPC = pCtx->rip;
962 if (!IEM_IS_CANONICAL(GCPtrPC))
963 return iemRaiseGeneralProtectionFault0(pIemCpu);
964 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
965 }
966 else
967 {
968 uint32_t GCPtrPC32 = pCtx->eip;
969 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
970 if (GCPtrPC32 > pCtx->cs.u32Limit)
971 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
972 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
973 if (!cbToTryRead) /* overflowed */
974 {
975 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
976 cbToTryRead = UINT32_MAX;
977 }
978 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
979 }
980
981#ifdef VBOX_WITH_RAW_MODE_NOT_R0
982 /* Allow interpretation of patch manager code blocks since they can for
983 instance throw #PFs for perfectly good reasons. */
984 if (pIemCpu->fInPatchCode)
985 {
986 size_t cbRead = 0;
987 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
988 AssertRCReturn(rc, rc);
989 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
990 return VINF_SUCCESS;
991 }
992#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
993
994 RTGCPHYS GCPhys;
995 uint64_t fFlags;
996 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
997 if (RT_FAILURE(rc))
998 {
999 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1000 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1001 }
1002 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1003 {
1004 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1005 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1006 }
1007 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1008 {
1009 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1010 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1011 }
1012 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1013 /** @todo Check reserved bits and such stuff. PGM is better at doing
1014 * that, so do it when implementing the guest virtual address
1015 * TLB... */
1016
1017#ifdef IEM_VERIFICATION_MODE_FULL
1018 /*
1019 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1020 * instruction.
1021 */
1022 /** @todo optimize this differently by not using PGMPhysRead. */
1023 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1024 pIemCpu->GCPhysOpcodes = GCPhys;
1025 if ( offPrevOpcodes < cbOldOpcodes
1026 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1027 {
1028 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1029 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1030 pIemCpu->cbOpcode = cbNew;
1031 return VINF_SUCCESS;
1032 }
1033#endif
1034
1035 /*
1036 * Read the bytes at this address.
1037 */
1038 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1039#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1040 size_t cbActual;
1041 if ( PATMIsEnabled(pVM)
1042 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1043 {
1044 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1045 Assert(cbActual > 0);
1046 pIemCpu->cbOpcode = (uint8_t)cbActual;
1047 }
1048 else
1049#endif
1050 {
1051 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1052 if (cbToTryRead > cbLeftOnPage)
1053 cbToTryRead = cbLeftOnPage;
1054 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1055 cbToTryRead = sizeof(pIemCpu->abOpcode);
1056
1057 if (!pIemCpu->fBypassHandlers)
1058 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead);
1059 else
1060 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1061 if (rc != VINF_SUCCESS)
1062 {
1063 /** @todo status code handling */
1064 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1065 GCPtrPC, GCPhys, rc, cbToTryRead));
1066 return rc;
1067 }
1068 pIemCpu->cbOpcode = cbToTryRead;
1069 }
1070
1071 return VINF_SUCCESS;
1072}
1073
1074
1075/**
1076 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1077 * exception if it fails.
1078 *
1079 * @returns Strict VBox status code.
1080 * @param pIemCpu The IEM state.
1081 * @param cbMin The minimum number of bytes relative offOpcode
1082 * that must be read.
1083 */
1084static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1085{
1086 /*
1087 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1088 *
1089 * First translate CS:rIP to a physical address.
1090 */
1091 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1092 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1093 uint32_t cbToTryRead;
1094 RTGCPTR GCPtrNext;
1095 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1096 {
1097 cbToTryRead = PAGE_SIZE;
1098 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1099 if (!IEM_IS_CANONICAL(GCPtrNext))
1100 return iemRaiseGeneralProtectionFault0(pIemCpu);
1101 }
1102 else
1103 {
1104 uint32_t GCPtrNext32 = pCtx->eip;
1105 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1106 GCPtrNext32 += pIemCpu->cbOpcode;
1107 if (GCPtrNext32 > pCtx->cs.u32Limit)
1108 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1109 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1110 if (!cbToTryRead) /* overflowed */
1111 {
1112 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1113 cbToTryRead = UINT32_MAX;
1114 /** @todo check out wrapping around the code segment. */
1115 }
1116 if (cbToTryRead < cbMin - cbLeft)
1117 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1118 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
1119 }
1120
1121 /* Only read up to the end of the page, and make sure we don't read more
1122 than the opcode buffer can hold. */
1123 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1124 if (cbToTryRead > cbLeftOnPage)
1125 cbToTryRead = cbLeftOnPage;
1126 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1127 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1128 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1129
1130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1131 /* Allow interpretation of patch manager code blocks since they can for
1132 instance throw #PFs for perfectly good reasons. */
1133 if (pIemCpu->fInPatchCode)
1134 {
1135 size_t cbRead = 0;
1136 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1137 AssertRCReturn(rc, rc);
1138 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1139 return VINF_SUCCESS;
1140 }
1141#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1142
1143 RTGCPHYS GCPhys;
1144 uint64_t fFlags;
1145 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1146 if (RT_FAILURE(rc))
1147 {
1148 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1149 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1150 }
1151 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1152 {
1153 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1154 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1155 }
1156 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1157 {
1158 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1159 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1160 }
1161 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1162 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1163 /** @todo Check reserved bits and such stuff. PGM is better at doing
1164 * that, so do it when implementing the guest virtual address
1165 * TLB... */
1166
1167 /*
1168 * Read the bytes at this address.
1169 *
1170 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1171 * and since PATM should only patch the start of an instruction there
1172 * should be no need to check again here.
1173 */
1174 if (!pIemCpu->fBypassHandlers)
1175 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1176 else
1177 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1178 if (rc != VINF_SUCCESS)
1179 {
1180 /** @todo status code handling */
1181 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1182 return rc;
1183 }
1184 pIemCpu->cbOpcode += cbToTryRead;
1185 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1186
1187 return VINF_SUCCESS;
1188}
1189
1190
1191/**
1192 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1193 *
1194 * @returns Strict VBox status code.
1195 * @param pIemCpu The IEM state.
1196 * @param pb Where to return the opcode byte.
1197 */
1198DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1199{
1200 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1201 if (rcStrict == VINF_SUCCESS)
1202 {
1203 uint8_t offOpcode = pIemCpu->offOpcode;
1204 *pb = pIemCpu->abOpcode[offOpcode];
1205 pIemCpu->offOpcode = offOpcode + 1;
1206 }
1207 else
1208 *pb = 0;
1209 return rcStrict;
1210}
1211
1212
1213/**
1214 * Fetches the next opcode byte.
1215 *
1216 * @returns Strict VBox status code.
1217 * @param pIemCpu The IEM state.
1218 * @param pu8 Where to return the opcode byte.
1219 */
1220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1221{
1222 uint8_t const offOpcode = pIemCpu->offOpcode;
1223 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1224 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1225
1226 *pu8 = pIemCpu->abOpcode[offOpcode];
1227 pIemCpu->offOpcode = offOpcode + 1;
1228 return VINF_SUCCESS;
1229}
1230
1231
1232/**
1233 * Fetches the next opcode byte, returns automatically on failure.
1234 *
1235 * @param a_pu8 Where to return the opcode byte.
1236 * @remark Implicitly references pIemCpu.
1237 */
1238#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1239 do \
1240 { \
1241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1242 if (rcStrict2 != VINF_SUCCESS) \
1243 return rcStrict2; \
1244 } while (0)
1245
1246
1247/**
1248 * Fetches the next signed byte from the opcode stream.
1249 *
1250 * @returns Strict VBox status code.
1251 * @param pIemCpu The IEM state.
1252 * @param pi8 Where to return the signed byte.
1253 */
1254DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1255{
1256 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1257}
1258
1259
1260/**
1261 * Fetches the next signed byte from the opcode stream, returning automatically
1262 * on failure.
1263 *
1264 * @param pi8 Where to return the signed byte.
1265 * @remark Implicitly references pIemCpu.
1266 */
1267#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1268 do \
1269 { \
1270 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1271 if (rcStrict2 != VINF_SUCCESS) \
1272 return rcStrict2; \
1273 } while (0)
1274
1275
1276/**
1277 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1278 *
1279 * @returns Strict VBox status code.
1280 * @param pIemCpu The IEM state.
1281 * @param pu16 Where to return the opcode dword.
1282 */
1283DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1284{
1285 uint8_t u8;
1286 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1287 if (rcStrict == VINF_SUCCESS)
1288 *pu16 = (int8_t)u8;
1289 return rcStrict;
1290}
1291
1292
1293/**
1294 * Fetches the next signed byte from the opcode stream, extending it to
1295 * unsigned 16-bit.
1296 *
1297 * @returns Strict VBox status code.
1298 * @param pIemCpu The IEM state.
1299 * @param pu16 Where to return the unsigned word.
1300 */
1301DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1302{
1303 uint8_t const offOpcode = pIemCpu->offOpcode;
1304 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1305 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1306
1307 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1308 pIemCpu->offOpcode = offOpcode + 1;
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Fetches the next signed byte from the opcode stream and sign-extending it to
1315 * a word, returning automatically on failure.
1316 *
1317 * @param pu16 Where to return the word.
1318 * @remark Implicitly references pIemCpu.
1319 */
1320#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1321 do \
1322 { \
1323 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1324 if (rcStrict2 != VINF_SUCCESS) \
1325 return rcStrict2; \
1326 } while (0)
1327
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pIemCpu The IEM state.
1334 * @param pu32 Where to return the opcode dword.
1335 */
1336DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu32 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Fetches the next signed byte from the opcode stream, extending it to
1348 * unsigned 32-bit.
1349 *
1350 * @returns Strict VBox status code.
1351 * @param pIemCpu The IEM state.
1352 * @param pu32 Where to return the unsigned dword.
1353 */
1354DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1355{
1356 uint8_t const offOpcode = pIemCpu->offOpcode;
1357 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1358 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1359
1360 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1361 pIemCpu->offOpcode = offOpcode + 1;
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Fetches the next signed byte from the opcode stream and sign-extending it to
1368 * a word, returning automatically on failure.
1369 *
1370 * @param pu32 Where to return the word.
1371 * @remark Implicitly references pIemCpu.
1372 */
1373#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1374 do \
1375 { \
1376 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1377 if (rcStrict2 != VINF_SUCCESS) \
1378 return rcStrict2; \
1379 } while (0)
1380
1381
1382/**
1383 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1384 *
1385 * @returns Strict VBox status code.
1386 * @param pIemCpu The IEM state.
1387 * @param pu64 Where to return the opcode qword.
1388 */
1389DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1390{
1391 uint8_t u8;
1392 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1393 if (rcStrict == VINF_SUCCESS)
1394 *pu64 = (int8_t)u8;
1395 return rcStrict;
1396}
1397
1398
1399/**
1400 * Fetches the next signed byte from the opcode stream, extending it to
1401 * unsigned 64-bit.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pIemCpu The IEM state.
1405 * @param pu64 Where to return the unsigned qword.
1406 */
1407DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1408{
1409 uint8_t const offOpcode = pIemCpu->offOpcode;
1410 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1411 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1412
1413 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1414 pIemCpu->offOpcode = offOpcode + 1;
1415 return VINF_SUCCESS;
1416}
1417
1418
1419/**
1420 * Fetches the next signed byte from the opcode stream and sign-extending it to
1421 * a word, returning automatically on failure.
1422 *
1423 * @param pu64 Where to return the word.
1424 * @remark Implicitly references pIemCpu.
1425 */
1426#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1427 do \
1428 { \
1429 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1430 if (rcStrict2 != VINF_SUCCESS) \
1431 return rcStrict2; \
1432 } while (0)
1433
1434
1435/**
1436 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1437 *
1438 * @returns Strict VBox status code.
1439 * @param pIemCpu The IEM state.
1440 * @param pu16 Where to return the opcode word.
1441 */
1442DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1443{
1444 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1445 if (rcStrict == VINF_SUCCESS)
1446 {
1447 uint8_t offOpcode = pIemCpu->offOpcode;
1448 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1449 pIemCpu->offOpcode = offOpcode + 2;
1450 }
1451 else
1452 *pu16 = 0;
1453 return rcStrict;
1454}
1455
1456
1457/**
1458 * Fetches the next opcode word.
1459 *
1460 * @returns Strict VBox status code.
1461 * @param pIemCpu The IEM state.
1462 * @param pu16 Where to return the opcode word.
1463 */
1464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1465{
1466 uint8_t const offOpcode = pIemCpu->offOpcode;
1467 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1468 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1469
1470 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1471 pIemCpu->offOpcode = offOpcode + 2;
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * Fetches the next opcode word, returns automatically on failure.
1478 *
1479 * @param a_pu16 Where to return the opcode word.
1480 * @remark Implicitly references pIemCpu.
1481 */
1482#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1483 do \
1484 { \
1485 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1486 if (rcStrict2 != VINF_SUCCESS) \
1487 return rcStrict2; \
1488 } while (0)
1489
1490
1491/**
1492 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1493 *
1494 * @returns Strict VBox status code.
1495 * @param pIemCpu The IEM state.
1496 * @param pu32 Where to return the opcode double word.
1497 */
1498DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1499{
1500 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1501 if (rcStrict == VINF_SUCCESS)
1502 {
1503 uint8_t offOpcode = pIemCpu->offOpcode;
1504 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1505 pIemCpu->offOpcode = offOpcode + 2;
1506 }
1507 else
1508 *pu32 = 0;
1509 return rcStrict;
1510}
1511
1512
1513/**
1514 * Fetches the next opcode word, zero extending it to a double word.
1515 *
1516 * @returns Strict VBox status code.
1517 * @param pIemCpu The IEM state.
1518 * @param pu32 Where to return the opcode double word.
1519 */
1520DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1521{
1522 uint8_t const offOpcode = pIemCpu->offOpcode;
1523 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1524 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1525
1526 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1527 pIemCpu->offOpcode = offOpcode + 2;
1528 return VINF_SUCCESS;
1529}
1530
1531
1532/**
1533 * Fetches the next opcode word and zero extends it to a double word, returns
1534 * automatically on failure.
1535 *
1536 * @param a_pu32 Where to return the opcode double word.
1537 * @remark Implicitly references pIemCpu.
1538 */
1539#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1540 do \
1541 { \
1542 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1543 if (rcStrict2 != VINF_SUCCESS) \
1544 return rcStrict2; \
1545 } while (0)
1546
1547
1548/**
1549 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1550 *
1551 * @returns Strict VBox status code.
1552 * @param pIemCpu The IEM state.
1553 * @param pu64 Where to return the opcode quad word.
1554 */
1555DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1556{
1557 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1558 if (rcStrict == VINF_SUCCESS)
1559 {
1560 uint8_t offOpcode = pIemCpu->offOpcode;
1561 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1562 pIemCpu->offOpcode = offOpcode + 2;
1563 }
1564 else
1565 *pu64 = 0;
1566 return rcStrict;
1567}
1568
1569
1570/**
1571 * Fetches the next opcode word, zero extending it to a quad word.
1572 *
1573 * @returns Strict VBox status code.
1574 * @param pIemCpu The IEM state.
1575 * @param pu64 Where to return the opcode quad word.
1576 */
1577DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1578{
1579 uint8_t const offOpcode = pIemCpu->offOpcode;
1580 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1581 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1582
1583 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1584 pIemCpu->offOpcode = offOpcode + 2;
1585 return VINF_SUCCESS;
1586}
1587
1588
1589/**
1590 * Fetches the next opcode word and zero extends it to a quad word, returns
1591 * automatically on failure.
1592 *
1593 * @param a_pu64 Where to return the opcode quad word.
1594 * @remark Implicitly references pIemCpu.
1595 */
1596#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1597 do \
1598 { \
1599 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1600 if (rcStrict2 != VINF_SUCCESS) \
1601 return rcStrict2; \
1602 } while (0)
1603
1604
1605/**
1606 * Fetches the next signed word from the opcode stream.
1607 *
1608 * @returns Strict VBox status code.
1609 * @param pIemCpu The IEM state.
1610 * @param pi16 Where to return the signed word.
1611 */
1612DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1613{
1614 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1615}
1616
1617
1618/**
1619 * Fetches the next signed word from the opcode stream, returning automatically
1620 * on failure.
1621 *
1622 * @param pi16 Where to return the signed word.
1623 * @remark Implicitly references pIemCpu.
1624 */
1625#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1626 do \
1627 { \
1628 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1629 if (rcStrict2 != VINF_SUCCESS) \
1630 return rcStrict2; \
1631 } while (0)
1632
1633
1634/**
1635 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1636 *
1637 * @returns Strict VBox status code.
1638 * @param pIemCpu The IEM state.
1639 * @param pu32 Where to return the opcode dword.
1640 */
1641DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1642{
1643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1644 if (rcStrict == VINF_SUCCESS)
1645 {
1646 uint8_t offOpcode = pIemCpu->offOpcode;
1647 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1648 pIemCpu->abOpcode[offOpcode + 1],
1649 pIemCpu->abOpcode[offOpcode + 2],
1650 pIemCpu->abOpcode[offOpcode + 3]);
1651 pIemCpu->offOpcode = offOpcode + 4;
1652 }
1653 else
1654 *pu32 = 0;
1655 return rcStrict;
1656}
1657
1658
1659/**
1660 * Fetches the next opcode dword.
1661 *
1662 * @returns Strict VBox status code.
1663 * @param pIemCpu The IEM state.
1664 * @param pu32 Where to return the opcode double word.
1665 */
1666DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1667{
1668 uint8_t const offOpcode = pIemCpu->offOpcode;
1669 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1670 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1671
1672 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1673 pIemCpu->abOpcode[offOpcode + 1],
1674 pIemCpu->abOpcode[offOpcode + 2],
1675 pIemCpu->abOpcode[offOpcode + 3]);
1676 pIemCpu->offOpcode = offOpcode + 4;
1677 return VINF_SUCCESS;
1678}
1679
1680
1681/**
1682 * Fetches the next opcode dword, returns automatically on failure.
1683 *
1684 * @param a_pu32 Where to return the opcode dword.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu64 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword, zero extending it to a quad word.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu64 Where to return the opcode quad word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1733
1734 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword and zero extends it to a quad word, returns
1745 * automatically on failure.
1746 *
1747 * @param a_pu64 Where to return the opcode quad word.
1748 * @remark Implicitly references pIemCpu.
1749 */
1750#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1751 do \
1752 { \
1753 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1754 if (rcStrict2 != VINF_SUCCESS) \
1755 return rcStrict2; \
1756 } while (0)
1757
1758
1759/**
1760 * Fetches the next signed double word from the opcode stream.
1761 *
1762 * @returns Strict VBox status code.
1763 * @param pIemCpu The IEM state.
1764 * @param pi32 Where to return the signed double word.
1765 */
1766DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1767{
1768 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1769}
1770
1771/**
1772 * Fetches the next signed double word from the opcode stream, returning
1773 * automatically on failure.
1774 *
1775 * @param pi32 Where to return the signed double word.
1776 * @remark Implicitly references pIemCpu.
1777 */
1778#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1779 do \
1780 { \
1781 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1782 if (rcStrict2 != VINF_SUCCESS) \
1783 return rcStrict2; \
1784 } while (0)
1785
1786
1787/**
1788 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1789 *
1790 * @returns Strict VBox status code.
1791 * @param pIemCpu The IEM state.
1792 * @param pu64 Where to return the opcode qword.
1793 */
1794DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1795{
1796 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1797 if (rcStrict == VINF_SUCCESS)
1798 {
1799 uint8_t offOpcode = pIemCpu->offOpcode;
1800 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1801 pIemCpu->abOpcode[offOpcode + 1],
1802 pIemCpu->abOpcode[offOpcode + 2],
1803 pIemCpu->abOpcode[offOpcode + 3]);
1804 pIemCpu->offOpcode = offOpcode + 4;
1805 }
1806 else
1807 *pu64 = 0;
1808 return rcStrict;
1809}
1810
1811
1812/**
1813 * Fetches the next opcode dword, sign extending it into a quad word.
1814 *
1815 * @returns Strict VBox status code.
1816 * @param pIemCpu The IEM state.
1817 * @param pu64 Where to return the opcode quad word.
1818 */
1819DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1820{
1821 uint8_t const offOpcode = pIemCpu->offOpcode;
1822 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1823 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1824
1825 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1826 pIemCpu->abOpcode[offOpcode + 1],
1827 pIemCpu->abOpcode[offOpcode + 2],
1828 pIemCpu->abOpcode[offOpcode + 3]);
1829 *pu64 = i32;
1830 pIemCpu->offOpcode = offOpcode + 4;
1831 return VINF_SUCCESS;
1832}
1833
1834
1835/**
1836 * Fetches the next opcode double word and sign extends it to a quad word,
1837 * returns automatically on failure.
1838 *
1839 * @param a_pu64 Where to return the opcode quad word.
1840 * @remark Implicitly references pIemCpu.
1841 */
1842#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1843 do \
1844 { \
1845 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1846 if (rcStrict2 != VINF_SUCCESS) \
1847 return rcStrict2; \
1848 } while (0)
1849
1850
1851/**
1852 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1853 *
1854 * @returns Strict VBox status code.
1855 * @param pIemCpu The IEM state.
1856 * @param pu64 Where to return the opcode qword.
1857 */
1858DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1859{
1860 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1861 if (rcStrict == VINF_SUCCESS)
1862 {
1863 uint8_t offOpcode = pIemCpu->offOpcode;
1864 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1865 pIemCpu->abOpcode[offOpcode + 1],
1866 pIemCpu->abOpcode[offOpcode + 2],
1867 pIemCpu->abOpcode[offOpcode + 3],
1868 pIemCpu->abOpcode[offOpcode + 4],
1869 pIemCpu->abOpcode[offOpcode + 5],
1870 pIemCpu->abOpcode[offOpcode + 6],
1871 pIemCpu->abOpcode[offOpcode + 7]);
1872 pIemCpu->offOpcode = offOpcode + 8;
1873 }
1874 else
1875 *pu64 = 0;
1876 return rcStrict;
1877}
1878
1879
1880/**
1881 * Fetches the next opcode qword.
1882 *
1883 * @returns Strict VBox status code.
1884 * @param pIemCpu The IEM state.
1885 * @param pu64 Where to return the opcode qword.
1886 */
1887DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1888{
1889 uint8_t const offOpcode = pIemCpu->offOpcode;
1890 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1891 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1892
1893 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1894 pIemCpu->abOpcode[offOpcode + 1],
1895 pIemCpu->abOpcode[offOpcode + 2],
1896 pIemCpu->abOpcode[offOpcode + 3],
1897 pIemCpu->abOpcode[offOpcode + 4],
1898 pIemCpu->abOpcode[offOpcode + 5],
1899 pIemCpu->abOpcode[offOpcode + 6],
1900 pIemCpu->abOpcode[offOpcode + 7]);
1901 pIemCpu->offOpcode = offOpcode + 8;
1902 return VINF_SUCCESS;
1903}
1904
1905
1906/**
1907 * Fetches the next opcode quad word, returns automatically on failure.
1908 *
1909 * @param a_pu64 Where to return the opcode quad word.
1910 * @remark Implicitly references pIemCpu.
1911 */
1912#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1913 do \
1914 { \
1915 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1916 if (rcStrict2 != VINF_SUCCESS) \
1917 return rcStrict2; \
1918 } while (0)
1919
1920
1921/** @name Misc Worker Functions.
1922 * @{
1923 */
1924
1925
1926/**
1927 * Validates a new SS segment.
1928 *
1929 * @returns VBox strict status code.
1930 * @param pIemCpu The IEM per CPU instance data.
1931 * @param pCtx The CPU context.
1932 * @param NewSS The new SS selctor.
1933 * @param uCpl The CPL to load the stack for.
1934 * @param pDesc Where to return the descriptor.
1935 */
1936static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1937{
1938 NOREF(pCtx);
1939
1940 /* Null selectors are not allowed (we're not called for dispatching
1941 interrupts with SS=0 in long mode). */
1942 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1943 {
1944 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1945 return iemRaiseTaskSwitchFault0(pIemCpu);
1946 }
1947
1948 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1949 if ((NewSS & X86_SEL_RPL) != uCpl)
1950 {
1951 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1952 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1953 }
1954
1955 /*
1956 * Read the descriptor.
1957 */
1958 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1959 if (rcStrict != VINF_SUCCESS)
1960 return rcStrict;
1961
1962 /*
1963 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1964 */
1965 if (!pDesc->Legacy.Gen.u1DescType)
1966 {
1967 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1968 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1969 }
1970
1971 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1972 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1973 {
1974 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1975 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1976 }
1977 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1978 {
1979 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1980 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1981 }
1982
1983 /* Is it there? */
1984 /** @todo testcase: Is this checked before the canonical / limit check below? */
1985 if (!pDesc->Legacy.Gen.u1Present)
1986 {
1987 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1988 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1989 }
1990
1991 return VINF_SUCCESS;
1992}
1993
1994
1995/**
1996 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1997 * not.
1998 *
1999 * @param a_pIemCpu The IEM per CPU data.
2000 * @param a_pCtx The CPU context.
2001 */
2002#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2003# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2004 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2005 ? (a_pCtx)->eflags.u \
2006 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2007#else
2008# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2009 ( (a_pCtx)->eflags.u )
2010#endif
2011
2012/**
2013 * Updates the EFLAGS in the correct manner wrt. PATM.
2014 *
2015 * @param a_pIemCpu The IEM per CPU data.
2016 * @param a_pCtx The CPU context.
2017 */
2018#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2019# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2020 do { \
2021 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2022 (a_pCtx)->eflags.u = (a_fEfl); \
2023 else \
2024 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2025 } while (0)
2026#else
2027# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2028 do { \
2029 (a_pCtx)->eflags.u = (a_fEfl); \
2030 } while (0)
2031#endif
2032
2033
2034/** @} */
2035
2036/** @name Raising Exceptions.
2037 *
2038 * @{
2039 */
2040
2041/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2042 * @{ */
2043/** CPU exception. */
2044#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2045/** External interrupt (from PIC, APIC, whatever). */
2046#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2047/** Software interrupt (int or into, not bound).
2048 * Returns to the following instruction */
2049#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2050/** Takes an error code. */
2051#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2052/** Takes a CR2. */
2053#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2054/** Generated by the breakpoint instruction. */
2055#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2056/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2057#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2058/** @} */
2059
2060
2061/**
2062 * Loads the specified stack far pointer from the TSS.
2063 *
2064 * @returns VBox strict status code.
2065 * @param pIemCpu The IEM per CPU instance data.
2066 * @param pCtx The CPU context.
2067 * @param uCpl The CPL to load the stack for.
2068 * @param pSelSS Where to return the new stack segment.
2069 * @param puEsp Where to return the new stack pointer.
2070 */
2071static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2072 PRTSEL pSelSS, uint32_t *puEsp)
2073{
2074 VBOXSTRICTRC rcStrict;
2075 Assert(uCpl < 4);
2076 *puEsp = 0; /* make gcc happy */
2077 *pSelSS = 0; /* make gcc happy */
2078
2079 switch (pCtx->tr.Attr.n.u4Type)
2080 {
2081 /*
2082 * 16-bit TSS (X86TSS16).
2083 */
2084 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2085 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2086 {
2087 uint32_t off = uCpl * 4 + 2;
2088 if (off + 4 > pCtx->tr.u32Limit)
2089 {
2090 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2091 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2092 }
2093
2094 uint32_t u32Tmp = 0; /* gcc maybe... */
2095 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 *puEsp = RT_LOWORD(u32Tmp);
2099 *pSelSS = RT_HIWORD(u32Tmp);
2100 return VINF_SUCCESS;
2101 }
2102 break;
2103 }
2104
2105 /*
2106 * 32-bit TSS (X86TSS32).
2107 */
2108 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2109 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2110 {
2111 uint32_t off = uCpl * 8 + 4;
2112 if (off + 7 > pCtx->tr.u32Limit)
2113 {
2114 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2115 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2116 }
2117
2118 uint64_t u64Tmp;
2119 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2120 if (rcStrict == VINF_SUCCESS)
2121 {
2122 *puEsp = u64Tmp & UINT32_MAX;
2123 *pSelSS = (RTSEL)(u64Tmp >> 32);
2124 return VINF_SUCCESS;
2125 }
2126 break;
2127 }
2128
2129 default:
2130 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2131 }
2132 return rcStrict;
2133}
2134
2135
2136/**
2137 * Loads the specified stack pointer from the 64-bit TSS.
2138 *
2139 * @returns VBox strict status code.
2140 * @param pIemCpu The IEM per CPU instance data.
2141 * @param pCtx The CPU context.
2142 * @param uCpl The CPL to load the stack for.
2143 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2144 * @param puRsp Where to return the new stack pointer.
2145 */
2146static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2147 uint64_t *puRsp)
2148{
2149 Assert(uCpl < 4);
2150 Assert(uIst < 8);
2151 *puRsp = 0; /* make gcc happy */
2152
2153 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2154
2155 uint32_t off;
2156 if (uIst)
2157 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2158 else
2159 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2160 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2161 {
2162 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2163 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2164 }
2165
2166 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2167}
2168
2169
2170/**
2171 * Adjust the CPU state according to the exception being raised.
2172 *
2173 * @param pCtx The CPU context.
2174 * @param u8Vector The exception that has been raised.
2175 */
2176DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2177{
2178 switch (u8Vector)
2179 {
2180 case X86_XCPT_DB:
2181 pCtx->dr[7] &= ~X86_DR7_GD;
2182 break;
2183 /** @todo Read the AMD and Intel exception reference... */
2184 }
2185}
2186
2187
2188/**
2189 * Implements exceptions and interrupts for real mode.
2190 *
2191 * @returns VBox strict status code.
2192 * @param pIemCpu The IEM per CPU instance data.
2193 * @param pCtx The CPU context.
2194 * @param cbInstr The number of bytes to offset rIP by in the return
2195 * address.
2196 * @param u8Vector The interrupt / exception vector number.
2197 * @param fFlags The flags.
2198 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2199 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2200 */
2201static VBOXSTRICTRC
2202iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2203 PCPUMCTX pCtx,
2204 uint8_t cbInstr,
2205 uint8_t u8Vector,
2206 uint32_t fFlags,
2207 uint16_t uErr,
2208 uint64_t uCr2)
2209{
2210 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2211 NOREF(uErr); NOREF(uCr2);
2212
2213 /*
2214 * Read the IDT entry.
2215 */
2216 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2217 {
2218 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2219 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2220 }
2221 RTFAR16 Idte;
2222 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2223 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2224 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2225 return rcStrict;
2226
2227 /*
2228 * Push the stack frame.
2229 */
2230 uint16_t *pu16Frame;
2231 uint64_t uNewRsp;
2232 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2233 if (rcStrict != VINF_SUCCESS)
2234 return rcStrict;
2235
2236 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2237 pu16Frame[2] = (uint16_t)fEfl;
2238 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2239 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2240 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2241 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2242 return rcStrict;
2243
2244 /*
2245 * Load the vector address into cs:ip and make exception specific state
2246 * adjustments.
2247 */
2248 pCtx->cs.Sel = Idte.sel;
2249 pCtx->cs.ValidSel = Idte.sel;
2250 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2251 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2252 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2253 pCtx->rip = Idte.off;
2254 fEfl &= ~X86_EFL_IF;
2255 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2256
2257 /** @todo do we actually do this in real mode? */
2258 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2259 iemRaiseXcptAdjustState(pCtx, u8Vector);
2260
2261 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2262}
2263
2264
2265/**
2266 * Loads a NULL data selector into when coming from V8086 mode.
2267 *
2268 * @param pIemCpu The IEM per CPU instance data.
2269 * @param pSReg Pointer to the segment register.
2270 */
2271static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2272{
2273 pSReg->Sel = 0;
2274 pSReg->ValidSel = 0;
2275 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2276 {
2277 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2278 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2279 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2280 }
2281 else
2282 {
2283 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2284 /** @todo check this on AMD-V */
2285 pSReg->u64Base = 0;
2286 pSReg->u32Limit = 0;
2287 }
2288}
2289
2290
2291/**
2292 * Loads a segment selector during a task switch in V8086 mode.
2293 *
2294 * @param pIemCpu The IEM per CPU instance data.
2295 * @param pSReg Pointer to the segment register.
2296 * @param uSel The selector value to load.
2297 */
2298static void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2299{
2300 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2301 pSReg->Sel = uSel;
2302 pSReg->ValidSel = uSel;
2303 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2304 pSReg->u64Base = uSel << 4;
2305 pSReg->u32Limit = 0xffff;
2306 pSReg->Attr.u = 0xf3;
2307}
2308
2309
2310/**
2311 * Loads a NULL data selector into a selector register, both the hidden and
2312 * visible parts, in protected mode.
2313 *
2314 * @param pIemCpu The IEM state of the calling EMT.
2315 * @param pSReg Pointer to the segment register.
2316 * @param uRpl The RPL.
2317 */
2318static void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2319{
2320 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2321 * data selector in protected mode. */
2322 pSReg->Sel = uRpl;
2323 pSReg->ValidSel = uRpl;
2324 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2325 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2326 {
2327 /* VT-x (Intel 3960x) observed doing something like this. */
2328 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2329 pSReg->u32Limit = UINT32_MAX;
2330 pSReg->u64Base = 0;
2331 }
2332 else
2333 {
2334 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2335 pSReg->u32Limit = 0;
2336 pSReg->u64Base = 0;
2337 }
2338}
2339
2340
2341/**
2342 * Loads a segment selector during a task switch in protected mode. In this task
2343 * switch scenario, we would throw #TS exceptions rather than #GPs.
2344 *
2345 * @returns VBox strict status code.
2346 * @param pIemCpu The IEM per CPU instance data.
2347 * @param pSReg Pointer to the segment register.
2348 * @param uSel The new selector value.
2349 *
2350 * @remarks This does -NOT- handle CS or SS.
2351 * @remarks This expects pIemCpu->uCpl to be up to date.
2352 */
2353static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2354{
2355 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2356
2357 /* Null data selector. */
2358 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2359 {
2360 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2362 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2363 return VINF_SUCCESS;
2364 }
2365
2366 /* Fetch the descriptor. */
2367 IEMSELDESC Desc;
2368 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2369 if (rcStrict != VINF_SUCCESS)
2370 {
2371 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2372 VBOXSTRICTRC_VAL(rcStrict)));
2373 return rcStrict;
2374 }
2375
2376 /* Must be a data segment or readable code segment. */
2377 if ( !Desc.Legacy.Gen.u1DescType
2378 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2379 {
2380 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2381 Desc.Legacy.Gen.u4Type));
2382 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2383 }
2384
2385 /* Check privileges for data segments and non-conforming code segments. */
2386 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2387 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2388 {
2389 /* The RPL and the new CPL must be less than or equal to the DPL. */
2390 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2391 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2392 {
2393 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2394 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2395 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2396 }
2397 }
2398
2399 /* Is it there? */
2400 if (!Desc.Legacy.Gen.u1Present)
2401 {
2402 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2403 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2404 }
2405
2406 /* The base and limit. */
2407 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2408 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2409
2410 /*
2411 * Ok, everything checked out fine. Now set the accessed bit before
2412 * committing the result into the registers.
2413 */
2414 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2415 {
2416 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2417 if (rcStrict != VINF_SUCCESS)
2418 return rcStrict;
2419 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2420 }
2421
2422 /* Commit */
2423 pSReg->Sel = uSel;
2424 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2425 pSReg->u32Limit = cbLimit;
2426 pSReg->u64Base = u64Base;
2427 pSReg->ValidSel = uSel;
2428 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2429 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2430 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2431
2432 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2433 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2434 return VINF_SUCCESS;
2435}
2436
2437
2438/**
2439 * Performs a task switch.
2440 *
2441 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2442 * caller is responsible for performing the necessary checks (like DPL, TSS
2443 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2444 * reference for JMP, CALL, IRET.
2445 *
2446 * If the task switch is the due to a software interrupt or hardware exception,
2447 * the caller is responsible for validating the TSS selector and descriptor. See
2448 * Intel Instruction reference for INT n.
2449 *
2450 * @returns VBox strict status code.
2451 * @param pIemCpu The IEM per CPU instance data.
2452 * @param pCtx The CPU context.
2453 * @param enmTaskSwitch What caused this task switch.
2454 * @param uNextEip The EIP effective after the task switch.
2455 * @param fFlags The flags.
2456 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2457 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2458 * @param SelTSS The TSS selector of the new task.
2459 * @param pNewDescTSS Pointer to the new TSS descriptor.
2460 */
2461static VBOXSTRICTRC iemTaskSwitch(PIEMCPU pIemCpu,
2462 PCPUMCTX pCtx,
2463 IEMTASKSWITCH enmTaskSwitch,
2464 uint32_t uNextEip,
2465 uint32_t fFlags,
2466 uint16_t uErr,
2467 uint64_t uCr2,
2468 RTSEL SelTSS,
2469 PIEMSELDESC pNewDescTSS)
2470{
2471 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2472 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2473
2474 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2475 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2476 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2477 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2478 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2479
2480 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2481 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2482
2483 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2484 fIsNewTSS386, pCtx->eip, uNextEip));
2485
2486 /* Update CR2 in case it's a page-fault. */
2487 /** @todo This should probably be done much earlier in IEM/PGM. See
2488 * @bugref{5653} comment#49. */
2489 if (fFlags & IEM_XCPT_FLAGS_CR2)
2490 pCtx->cr2 = uCr2;
2491
2492 /*
2493 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2494 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2495 */
2496 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2497 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2498 if (uNewTSSLimit < uNewTSSLimitMin)
2499 {
2500 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2501 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2502 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2503 }
2504
2505 /*
2506 * Check the current TSS limit. The last written byte to the current TSS during the
2507 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2508 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2509 *
2510 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2511 * end up with smaller than "legal" TSS limits.
2512 */
2513 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2514 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2515 if (uCurTSSLimit < uCurTSSLimitMin)
2516 {
2517 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2518 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2519 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2520 }
2521
2522 /*
2523 * Verify that the new TSS can be accessed and map it. Map only the required contents
2524 * and not the entire TSS.
2525 */
2526 void *pvNewTSS;
2527 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2528 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2529 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2530 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2531 * not perform correct translation if this happens. See Intel spec. 7.2.1
2532 * "Task-State Segment" */
2533 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2534 if (rcStrict != VINF_SUCCESS)
2535 {
2536 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2537 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2538 return rcStrict;
2539 }
2540
2541 /*
2542 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2543 */
2544 uint32_t u32EFlags = pCtx->eflags.u32;
2545 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2546 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2547 {
2548 PX86DESC pDescCurTSS;
2549 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2550 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2551 if (rcStrict != VINF_SUCCESS)
2552 {
2553 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2554 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2555 return rcStrict;
2556 }
2557
2558 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2559 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2560 if (rcStrict != VINF_SUCCESS)
2561 {
2562 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2563 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2564 return rcStrict;
2565 }
2566
2567 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2568 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2569 {
2570 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2571 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2572 u32EFlags &= ~X86_EFL_NT;
2573 }
2574 }
2575
2576 /*
2577 * Save the CPU state into the current TSS.
2578 */
2579 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2580 if (GCPtrNewTSS == GCPtrCurTSS)
2581 {
2582 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2583 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2584 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2585 }
2586 if (fIsNewTSS386)
2587 {
2588 /*
2589 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2590 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2591 */
2592 void *pvCurTSS32;
2593 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2594 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2595 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2596 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2597 if (rcStrict != VINF_SUCCESS)
2598 {
2599 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2600 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2601 return rcStrict;
2602 }
2603
2604 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2605 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2606 pCurTSS32->eip = uNextEip;
2607 pCurTSS32->eflags = u32EFlags;
2608 pCurTSS32->eax = pCtx->eax;
2609 pCurTSS32->ecx = pCtx->ecx;
2610 pCurTSS32->edx = pCtx->edx;
2611 pCurTSS32->ebx = pCtx->ebx;
2612 pCurTSS32->esp = pCtx->esp;
2613 pCurTSS32->ebp = pCtx->ebp;
2614 pCurTSS32->esi = pCtx->esi;
2615 pCurTSS32->edi = pCtx->edi;
2616 pCurTSS32->es = pCtx->es.Sel;
2617 pCurTSS32->cs = pCtx->cs.Sel;
2618 pCurTSS32->ss = pCtx->ss.Sel;
2619 pCurTSS32->ds = pCtx->ds.Sel;
2620 pCurTSS32->fs = pCtx->fs.Sel;
2621 pCurTSS32->gs = pCtx->gs.Sel;
2622
2623 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2624 if (rcStrict != VINF_SUCCESS)
2625 {
2626 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2627 VBOXSTRICTRC_VAL(rcStrict)));
2628 return rcStrict;
2629 }
2630 }
2631 else
2632 {
2633 /*
2634 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2635 */
2636 void *pvCurTSS16;
2637 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2638 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2639 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2640 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2641 if (rcStrict != VINF_SUCCESS)
2642 {
2643 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2644 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2645 return rcStrict;
2646 }
2647
2648 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2649 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2650 pCurTSS16->ip = uNextEip;
2651 pCurTSS16->flags = u32EFlags;
2652 pCurTSS16->ax = pCtx->ax;
2653 pCurTSS16->cx = pCtx->cx;
2654 pCurTSS16->dx = pCtx->dx;
2655 pCurTSS16->bx = pCtx->bx;
2656 pCurTSS16->sp = pCtx->sp;
2657 pCurTSS16->bp = pCtx->bp;
2658 pCurTSS16->si = pCtx->si;
2659 pCurTSS16->di = pCtx->di;
2660 pCurTSS16->es = pCtx->es.Sel;
2661 pCurTSS16->cs = pCtx->cs.Sel;
2662 pCurTSS16->ss = pCtx->ss.Sel;
2663 pCurTSS16->ds = pCtx->ds.Sel;
2664
2665 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2666 if (rcStrict != VINF_SUCCESS)
2667 {
2668 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2669 VBOXSTRICTRC_VAL(rcStrict)));
2670 return rcStrict;
2671 }
2672 }
2673
2674 /*
2675 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2676 */
2677 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2678 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2679 {
2680 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2681 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2682 pNewTSS->selPrev = pCtx->tr.Sel;
2683 }
2684
2685 /*
2686 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2687 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2688 */
2689 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2690 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2691 bool fNewDebugTrap;
2692 if (fIsNewTSS386)
2693 {
2694 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2695 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2696 uNewEip = pNewTSS32->eip;
2697 uNewEflags = pNewTSS32->eflags;
2698 uNewEax = pNewTSS32->eax;
2699 uNewEcx = pNewTSS32->ecx;
2700 uNewEdx = pNewTSS32->edx;
2701 uNewEbx = pNewTSS32->ebx;
2702 uNewEsp = pNewTSS32->esp;
2703 uNewEbp = pNewTSS32->ebp;
2704 uNewEsi = pNewTSS32->esi;
2705 uNewEdi = pNewTSS32->edi;
2706 uNewES = pNewTSS32->es;
2707 uNewCS = pNewTSS32->cs;
2708 uNewSS = pNewTSS32->ss;
2709 uNewDS = pNewTSS32->ds;
2710 uNewFS = pNewTSS32->fs;
2711 uNewGS = pNewTSS32->gs;
2712 uNewLdt = pNewTSS32->selLdt;
2713 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2714 }
2715 else
2716 {
2717 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2718 uNewCr3 = 0;
2719 uNewEip = pNewTSS16->ip;
2720 uNewEflags = pNewTSS16->flags;
2721 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2722 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2723 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2724 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2725 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2726 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2727 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2728 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2729 uNewES = pNewTSS16->es;
2730 uNewCS = pNewTSS16->cs;
2731 uNewSS = pNewTSS16->ss;
2732 uNewDS = pNewTSS16->ds;
2733 uNewFS = 0;
2734 uNewGS = 0;
2735 uNewLdt = pNewTSS16->selLdt;
2736 fNewDebugTrap = false;
2737 }
2738
2739 if (GCPtrNewTSS == GCPtrCurTSS)
2740 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2741 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2742
2743 /*
2744 * We're done accessing the new TSS.
2745 */
2746 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2747 if (rcStrict != VINF_SUCCESS)
2748 {
2749 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2750 return rcStrict;
2751 }
2752
2753 /*
2754 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2755 */
2756 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2757 {
2758 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2759 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2760 if (rcStrict != VINF_SUCCESS)
2761 {
2762 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2763 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2764 return rcStrict;
2765 }
2766
2767 /* Check that the descriptor indicates the new TSS is available (not busy). */
2768 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2769 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2770 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2771
2772 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2773 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2774 if (rcStrict != VINF_SUCCESS)
2775 {
2776 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2777 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2778 return rcStrict;
2779 }
2780 }
2781
2782 /*
2783 * From this point on, we're technically in the new task. We will defer exceptions
2784 * until the completion of the task switch but before executing any instructions in the new task.
2785 */
2786 pCtx->tr.Sel = SelTSS;
2787 pCtx->tr.ValidSel = SelTSS;
2788 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2789 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2790 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2791 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2792 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2793
2794 /* Set the busy bit in TR. */
2795 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2796 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2797 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2798 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2799 {
2800 uNewEflags |= X86_EFL_NT;
2801 }
2802
2803 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2804 pCtx->cr0 |= X86_CR0_TS;
2805 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2806
2807 pCtx->eip = uNewEip;
2808 pCtx->eax = uNewEax;
2809 pCtx->ecx = uNewEcx;
2810 pCtx->edx = uNewEdx;
2811 pCtx->ebx = uNewEbx;
2812 pCtx->esp = uNewEsp;
2813 pCtx->ebp = uNewEbp;
2814 pCtx->esi = uNewEsi;
2815 pCtx->edi = uNewEdi;
2816
2817 uNewEflags &= X86_EFL_LIVE_MASK;
2818 uNewEflags |= X86_EFL_RA1_MASK;
2819 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2820
2821 /*
2822 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2823 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2824 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2825 */
2826 pCtx->es.Sel = uNewES;
2827 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2828 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2829
2830 pCtx->cs.Sel = uNewCS;
2831 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2832 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2833
2834 pCtx->ss.Sel = uNewSS;
2835 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2836 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2837
2838 pCtx->ds.Sel = uNewDS;
2839 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2840 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2841
2842 pCtx->fs.Sel = uNewFS;
2843 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2844 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2845
2846 pCtx->gs.Sel = uNewGS;
2847 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2848 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2849 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2850
2851 pCtx->ldtr.Sel = uNewLdt;
2852 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2853 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2854 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2855
2856 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2857 {
2858 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2859 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2860 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2861 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2862 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2863 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2864 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2865 }
2866
2867 /*
2868 * Switch CR3 for the new task.
2869 */
2870 if ( fIsNewTSS386
2871 && (pCtx->cr0 & X86_CR0_PG))
2872 {
2873 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2874 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2875 {
2876 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2877 AssertRCSuccessReturn(rc, rc);
2878 }
2879 else
2880 pCtx->cr3 = uNewCr3;
2881
2882 /* Inform PGM. */
2883 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2884 {
2885 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2886 AssertRCReturn(rc, rc);
2887 /* ignore informational status codes */
2888 }
2889 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2890 }
2891
2892 /*
2893 * Switch LDTR for the new task.
2894 */
2895 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2896 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2897 else
2898 {
2899 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2900
2901 IEMSELDESC DescNewLdt;
2902 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2903 if (rcStrict != VINF_SUCCESS)
2904 {
2905 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2906 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2907 return rcStrict;
2908 }
2909 if ( !DescNewLdt.Legacy.Gen.u1Present
2910 || DescNewLdt.Legacy.Gen.u1DescType
2911 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2912 {
2913 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2914 uNewLdt, DescNewLdt.Legacy.u));
2915 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2916 }
2917
2918 pCtx->ldtr.ValidSel = uNewLdt;
2919 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2920 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2921 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2922 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2923 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2924 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2925 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2926 }
2927
2928 IEMSELDESC DescSS;
2929 if (IEM_IS_V86_MODE(pIemCpu))
2930 {
2931 pIemCpu->uCpl = 3;
2932 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2933 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2934 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2935 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2936 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2937 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2938 }
2939 else
2940 {
2941 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2942
2943 /*
2944 * Load the stack segment for the new task.
2945 */
2946 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2947 {
2948 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2949 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2950 }
2951
2952 /* Fetch the descriptor. */
2953 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2954 if (rcStrict != VINF_SUCCESS)
2955 {
2956 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2957 VBOXSTRICTRC_VAL(rcStrict)));
2958 return rcStrict;
2959 }
2960
2961 /* SS must be a data segment and writable. */
2962 if ( !DescSS.Legacy.Gen.u1DescType
2963 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2964 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2965 {
2966 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2967 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2968 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2969 }
2970
2971 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2972 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2973 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2974 {
2975 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2976 uNewCpl));
2977 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2978 }
2979
2980 /* Is it there? */
2981 if (!DescSS.Legacy.Gen.u1Present)
2982 {
2983 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2984 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2985 }
2986
2987 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2988 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2989
2990 /* Set the accessed bit before committing the result into SS. */
2991 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2992 {
2993 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2994 if (rcStrict != VINF_SUCCESS)
2995 return rcStrict;
2996 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2997 }
2998
2999 /* Commit SS. */
3000 pCtx->ss.Sel = uNewSS;
3001 pCtx->ss.ValidSel = uNewSS;
3002 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3003 pCtx->ss.u32Limit = cbLimit;
3004 pCtx->ss.u64Base = u64Base;
3005 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3007
3008 /* CPL has changed, update IEM before loading rest of segments. */
3009 pIemCpu->uCpl = uNewCpl;
3010
3011 /*
3012 * Load the data segments for the new task.
3013 */
3014 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3015 if (rcStrict != VINF_SUCCESS)
3016 return rcStrict;
3017 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3018 if (rcStrict != VINF_SUCCESS)
3019 return rcStrict;
3020 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3021 if (rcStrict != VINF_SUCCESS)
3022 return rcStrict;
3023 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3024 if (rcStrict != VINF_SUCCESS)
3025 return rcStrict;
3026
3027 /*
3028 * Load the code segment for the new task.
3029 */
3030 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3031 {
3032 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3033 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3034 }
3035
3036 /* Fetch the descriptor. */
3037 IEMSELDESC DescCS;
3038 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3039 if (rcStrict != VINF_SUCCESS)
3040 {
3041 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3042 return rcStrict;
3043 }
3044
3045 /* CS must be a code segment. */
3046 if ( !DescCS.Legacy.Gen.u1DescType
3047 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3048 {
3049 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3050 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3051 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3052 }
3053
3054 /* For conforming CS, DPL must be less than or equal to the RPL. */
3055 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3056 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3057 {
3058 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3059 DescCS.Legacy.Gen.u2Dpl));
3060 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3061 }
3062
3063 /* For non-conforming CS, DPL must match RPL. */
3064 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3065 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3066 {
3067 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3068 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3069 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3070 }
3071
3072 /* Is it there? */
3073 if (!DescCS.Legacy.Gen.u1Present)
3074 {
3075 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3076 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3077 }
3078
3079 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3080 u64Base = X86DESC_BASE(&DescCS.Legacy);
3081
3082 /* Set the accessed bit before committing the result into CS. */
3083 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3084 {
3085 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3086 if (rcStrict != VINF_SUCCESS)
3087 return rcStrict;
3088 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3089 }
3090
3091 /* Commit CS. */
3092 pCtx->cs.Sel = uNewCS;
3093 pCtx->cs.ValidSel = uNewCS;
3094 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3095 pCtx->cs.u32Limit = cbLimit;
3096 pCtx->cs.u64Base = u64Base;
3097 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3098 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3099 }
3100
3101 /** @todo Debug trap. */
3102 if (fIsNewTSS386 && fNewDebugTrap)
3103 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3104
3105 /*
3106 * Construct the error code masks based on what caused this task switch.
3107 * See Intel Instruction reference for INT.
3108 */
3109 uint16_t uExt;
3110 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3111 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3112 {
3113 uExt = 1;
3114 }
3115 else
3116 uExt = 0;
3117
3118 /*
3119 * Push any error code on to the new stack.
3120 */
3121 if (fFlags & IEM_XCPT_FLAGS_ERR)
3122 {
3123 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
3126 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
3127
3128 /* Check that there is sufficient space on the stack. */
3129 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3130 if ( pCtx->esp - 1 > cbLimitSS
3131 || pCtx->esp < cbStackFrame)
3132 {
3133 /** @todo Intel says #SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3134 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3135 cbStackFrame));
3136 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3137 }
3138
3139 if (fIsNewTSS386)
3140 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3141 else
3142 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3143 if (rcStrict != VINF_SUCCESS)
3144 {
3145 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3146 VBOXSTRICTRC_VAL(rcStrict)));
3147 return rcStrict;
3148 }
3149 }
3150
3151 /* Check the new EIP against the new CS limit. */
3152 if (pCtx->eip > pCtx->cs.u32Limit)
3153 {
3154 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3155 pCtx->eip, pCtx->cs.u32Limit));
3156 /** @todo Intel says #GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3157 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3158 }
3159
3160 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3161 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3162}
3163
3164
3165/**
3166 * Implements exceptions and interrupts for protected mode.
3167 *
3168 * @returns VBox strict status code.
3169 * @param pIemCpu The IEM per CPU instance data.
3170 * @param pCtx The CPU context.
3171 * @param cbInstr The number of bytes to offset rIP by in the return
3172 * address.
3173 * @param u8Vector The interrupt / exception vector number.
3174 * @param fFlags The flags.
3175 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3176 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3177 */
3178static VBOXSTRICTRC
3179iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3180 PCPUMCTX pCtx,
3181 uint8_t cbInstr,
3182 uint8_t u8Vector,
3183 uint32_t fFlags,
3184 uint16_t uErr,
3185 uint64_t uCr2)
3186{
3187 NOREF(cbInstr);
3188
3189 /*
3190 * Read the IDT entry.
3191 */
3192 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3193 {
3194 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3195 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3196 }
3197 X86DESC Idte;
3198 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3199 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3200 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3201 return rcStrict;
3202 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3203 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3204 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3205
3206 /*
3207 * Check the descriptor type, DPL and such.
3208 * ASSUMES this is done in the same order as described for call-gate calls.
3209 */
3210 if (Idte.Gate.u1DescType)
3211 {
3212 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3213 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3214 }
3215 bool fTaskGate = false;
3216 uint8_t f32BitGate = true;
3217 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3218 switch (Idte.Gate.u4Type)
3219 {
3220 case X86_SEL_TYPE_SYS_UNDEFINED:
3221 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3222 case X86_SEL_TYPE_SYS_LDT:
3223 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3224 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3225 case X86_SEL_TYPE_SYS_UNDEFINED2:
3226 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3227 case X86_SEL_TYPE_SYS_UNDEFINED3:
3228 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3229 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3230 case X86_SEL_TYPE_SYS_UNDEFINED4:
3231 {
3232 /** @todo check what actually happens when the type is wrong...
3233 * esp. call gates. */
3234 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3235 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3236 }
3237
3238 case X86_SEL_TYPE_SYS_286_INT_GATE:
3239 f32BitGate = false;
3240 case X86_SEL_TYPE_SYS_386_INT_GATE:
3241 fEflToClear |= X86_EFL_IF;
3242 break;
3243
3244 case X86_SEL_TYPE_SYS_TASK_GATE:
3245 fTaskGate = true;
3246#ifndef IEM_IMPLEMENTS_TASKSWITCH
3247 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3248#endif
3249 break;
3250
3251 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3252 f32BitGate = false;
3253 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3254 break;
3255
3256 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3257 }
3258
3259 /* Check DPL against CPL if applicable. */
3260 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3261 {
3262 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3263 {
3264 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3265 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3266 }
3267 }
3268
3269 /* Is it there? */
3270 if (!Idte.Gate.u1Present)
3271 {
3272 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3273 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3274 }
3275
3276 /* Is it a task-gate? */
3277 if (fTaskGate)
3278 {
3279 /*
3280 * Construct the error code masks based on what caused this task switch.
3281 * See Intel Instruction reference for INT.
3282 */
3283 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3284 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3285 RTSEL SelTSS = Idte.Gate.u16Sel;
3286
3287 /*
3288 * Fetch the TSS descriptor in the GDT.
3289 */
3290 IEMSELDESC DescTSS;
3291 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3292 if (rcStrict != VINF_SUCCESS)
3293 {
3294 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3295 VBOXSTRICTRC_VAL(rcStrict)));
3296 return rcStrict;
3297 }
3298
3299 /* The TSS descriptor must be a system segment and be available (not busy). */
3300 if ( DescTSS.Legacy.Gen.u1DescType
3301 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3302 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3303 {
3304 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3305 u8Vector, SelTSS, DescTSS.Legacy.au64));
3306 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3307 }
3308
3309 /* The TSS must be present. */
3310 if (!DescTSS.Legacy.Gen.u1Present)
3311 {
3312 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3313 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3314 }
3315
3316 /* Do the actual task switch. */
3317 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3318 }
3319
3320 /* A null CS is bad. */
3321 RTSEL NewCS = Idte.Gate.u16Sel;
3322 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3323 {
3324 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3325 return iemRaiseGeneralProtectionFault0(pIemCpu);
3326 }
3327
3328 /* Fetch the descriptor for the new CS. */
3329 IEMSELDESC DescCS;
3330 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3331 if (rcStrict != VINF_SUCCESS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3334 return rcStrict;
3335 }
3336
3337 /* Must be a code segment. */
3338 if (!DescCS.Legacy.Gen.u1DescType)
3339 {
3340 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3341 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3342 }
3343 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3344 {
3345 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3346 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3347 }
3348
3349 /* Don't allow lowering the privilege level. */
3350 /** @todo Does the lowering of privileges apply to software interrupts
3351 * only? This has bearings on the more-privileged or
3352 * same-privilege stack behavior further down. A testcase would
3353 * be nice. */
3354 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3355 {
3356 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3357 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3358 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3359 }
3360
3361 /* Make sure the selector is present. */
3362 if (!DescCS.Legacy.Gen.u1Present)
3363 {
3364 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3365 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3366 }
3367
3368 /* Check the new EIP against the new CS limit. */
3369 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3370 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3371 ? Idte.Gate.u16OffsetLow
3372 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3373 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3374 if (uNewEip > cbLimitCS)
3375 {
3376 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3377 u8Vector, uNewEip, cbLimitCS, NewCS));
3378 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3379 }
3380
3381 /* Calc the flag image to push. */
3382 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3383 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3384 fEfl &= ~X86_EFL_RF;
3385 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3386 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3387
3388 /* From V8086 mode only go to CPL 0. */
3389 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3390 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3391 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3392 {
3393 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3394 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3395 }
3396
3397 /*
3398 * If the privilege level changes, we need to get a new stack from the TSS.
3399 * This in turns means validating the new SS and ESP...
3400 */
3401 if (uNewCpl != pIemCpu->uCpl)
3402 {
3403 RTSEL NewSS;
3404 uint32_t uNewEsp;
3405 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3406 if (rcStrict != VINF_SUCCESS)
3407 return rcStrict;
3408
3409 IEMSELDESC DescSS;
3410 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3411 if (rcStrict != VINF_SUCCESS)
3412 return rcStrict;
3413
3414 /* Check that there is sufficient space for the stack frame. */
3415 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3416 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
3417 {
3418 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
3419 }
3420
3421 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3422 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3423 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3424 if ( uNewEsp - 1 > cbLimitSS
3425 || uNewEsp < cbStackFrame)
3426 {
3427 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3428 u8Vector, NewSS, uNewEsp, cbStackFrame));
3429 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3430 }
3431
3432 /*
3433 * Start making changes.
3434 */
3435
3436 /* Create the stack frame. */
3437 RTPTRUNION uStackFrame;
3438 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3439 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3440 if (rcStrict != VINF_SUCCESS)
3441 return rcStrict;
3442 void * const pvStackFrame = uStackFrame.pv;
3443 if (f32BitGate)
3444 {
3445 if (fFlags & IEM_XCPT_FLAGS_ERR)
3446 *uStackFrame.pu32++ = uErr;
3447 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3448 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3449 uStackFrame.pu32[2] = fEfl;
3450 uStackFrame.pu32[3] = pCtx->esp;
3451 uStackFrame.pu32[4] = pCtx->ss.Sel;
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu32[1] = pCtx->cs.Sel;
3455 uStackFrame.pu32[5] = pCtx->es.Sel;
3456 uStackFrame.pu32[6] = pCtx->ds.Sel;
3457 uStackFrame.pu32[7] = pCtx->fs.Sel;
3458 uStackFrame.pu32[8] = pCtx->gs.Sel;
3459 }
3460 }
3461 else
3462 {
3463 if (fFlags & IEM_XCPT_FLAGS_ERR)
3464 *uStackFrame.pu16++ = uErr;
3465 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3466 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3467 uStackFrame.pu16[2] = fEfl;
3468 uStackFrame.pu16[3] = pCtx->sp;
3469 uStackFrame.pu16[4] = pCtx->ss.Sel;
3470 if (fEfl & X86_EFL_VM)
3471 {
3472 uStackFrame.pu16[1] = pCtx->cs.Sel;
3473 uStackFrame.pu16[5] = pCtx->es.Sel;
3474 uStackFrame.pu16[6] = pCtx->ds.Sel;
3475 uStackFrame.pu16[7] = pCtx->fs.Sel;
3476 uStackFrame.pu16[8] = pCtx->gs.Sel;
3477 }
3478 }
3479 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482
3483 /* Mark the selectors 'accessed' (hope this is the correct time). */
3484 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3485 * after pushing the stack frame? (Write protect the gdt + stack to
3486 * find out.) */
3487 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3488 {
3489 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3490 if (rcStrict != VINF_SUCCESS)
3491 return rcStrict;
3492 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3493 }
3494
3495 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3496 {
3497 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3501 }
3502
3503 /*
3504 * Start comitting the register changes (joins with the DPL=CPL branch).
3505 */
3506 pCtx->ss.Sel = NewSS;
3507 pCtx->ss.ValidSel = NewSS;
3508 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3509 pCtx->ss.u32Limit = cbLimitSS;
3510 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3511 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3512 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3513 pIemCpu->uCpl = uNewCpl;
3514
3515 if (fEfl & X86_EFL_VM)
3516 {
3517 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3518 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3519 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3520 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3521 }
3522 }
3523 /*
3524 * Same privilege, no stack change and smaller stack frame.
3525 */
3526 else
3527 {
3528 uint64_t uNewRsp;
3529 RTPTRUNION uStackFrame;
3530 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3531 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3532 if (rcStrict != VINF_SUCCESS)
3533 return rcStrict;
3534 void * const pvStackFrame = uStackFrame.pv;
3535
3536 if (f32BitGate)
3537 {
3538 if (fFlags & IEM_XCPT_FLAGS_ERR)
3539 *uStackFrame.pu32++ = uErr;
3540 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3541 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3542 uStackFrame.pu32[2] = fEfl;
3543 }
3544 else
3545 {
3546 if (fFlags & IEM_XCPT_FLAGS_ERR)
3547 *uStackFrame.pu16++ = uErr;
3548 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3549 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3550 uStackFrame.pu16[2] = fEfl;
3551 }
3552 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3553 if (rcStrict != VINF_SUCCESS)
3554 return rcStrict;
3555
3556 /* Mark the CS selector as 'accessed'. */
3557 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3558 {
3559 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3560 if (rcStrict != VINF_SUCCESS)
3561 return rcStrict;
3562 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3563 }
3564
3565 /*
3566 * Start committing the register changes (joins with the other branch).
3567 */
3568 pCtx->rsp = uNewRsp;
3569 }
3570
3571 /* ... register committing continues. */
3572 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3573 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3574 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3575 pCtx->cs.u32Limit = cbLimitCS;
3576 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3577 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3578
3579 pCtx->rip = uNewEip;
3580 fEfl &= ~fEflToClear;
3581 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3582
3583 if (fFlags & IEM_XCPT_FLAGS_CR2)
3584 pCtx->cr2 = uCr2;
3585
3586 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3587 iemRaiseXcptAdjustState(pCtx, u8Vector);
3588
3589 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3590}
3591
3592
3593/**
3594 * Implements exceptions and interrupts for long mode.
3595 *
3596 * @returns VBox strict status code.
3597 * @param pIemCpu The IEM per CPU instance data.
3598 * @param pCtx The CPU context.
3599 * @param cbInstr The number of bytes to offset rIP by in the return
3600 * address.
3601 * @param u8Vector The interrupt / exception vector number.
3602 * @param fFlags The flags.
3603 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3604 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3605 */
3606static VBOXSTRICTRC
3607iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3608 PCPUMCTX pCtx,
3609 uint8_t cbInstr,
3610 uint8_t u8Vector,
3611 uint32_t fFlags,
3612 uint16_t uErr,
3613 uint64_t uCr2)
3614{
3615 NOREF(cbInstr);
3616
3617 /*
3618 * Read the IDT entry.
3619 */
3620 uint16_t offIdt = (uint16_t)u8Vector << 4;
3621 if (pCtx->idtr.cbIdt < offIdt + 7)
3622 {
3623 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3624 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3625 }
3626 X86DESC64 Idte;
3627 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3628 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3629 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3630 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3631 return rcStrict;
3632 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3633 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3634 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3635
3636 /*
3637 * Check the descriptor type, DPL and such.
3638 * ASSUMES this is done in the same order as described for call-gate calls.
3639 */
3640 if (Idte.Gate.u1DescType)
3641 {
3642 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3643 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3644 }
3645 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3646 switch (Idte.Gate.u4Type)
3647 {
3648 case AMD64_SEL_TYPE_SYS_INT_GATE:
3649 fEflToClear |= X86_EFL_IF;
3650 break;
3651 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3652 break;
3653
3654 default:
3655 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3656 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658
3659 /* Check DPL against CPL if applicable. */
3660 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3661 {
3662 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3663 {
3664 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3665 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3666 }
3667 }
3668
3669 /* Is it there? */
3670 if (!Idte.Gate.u1Present)
3671 {
3672 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3673 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3674 }
3675
3676 /* A null CS is bad. */
3677 RTSEL NewCS = Idte.Gate.u16Sel;
3678 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3679 {
3680 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3681 return iemRaiseGeneralProtectionFault0(pIemCpu);
3682 }
3683
3684 /* Fetch the descriptor for the new CS. */
3685 IEMSELDESC DescCS;
3686 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3687 if (rcStrict != VINF_SUCCESS)
3688 {
3689 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3690 return rcStrict;
3691 }
3692
3693 /* Must be a 64-bit code segment. */
3694 if (!DescCS.Long.Gen.u1DescType)
3695 {
3696 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3697 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3698 }
3699 if ( !DescCS.Long.Gen.u1Long
3700 || DescCS.Long.Gen.u1DefBig
3701 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3702 {
3703 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3704 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3705 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3706 }
3707
3708 /* Don't allow lowering the privilege level. For non-conforming CS
3709 selectors, the CS.DPL sets the privilege level the trap/interrupt
3710 handler runs at. For conforming CS selectors, the CPL remains
3711 unchanged, but the CS.DPL must be <= CPL. */
3712 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3713 * when CPU in Ring-0. Result \#GP? */
3714 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3715 {
3716 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3717 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3718 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3719 }
3720
3721
3722 /* Make sure the selector is present. */
3723 if (!DescCS.Legacy.Gen.u1Present)
3724 {
3725 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3726 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3727 }
3728
3729 /* Check that the new RIP is canonical. */
3730 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3731 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3732 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3733 if (!IEM_IS_CANONICAL(uNewRip))
3734 {
3735 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3736 return iemRaiseGeneralProtectionFault0(pIemCpu);
3737 }
3738
3739 /*
3740 * If the privilege level changes or if the IST isn't zero, we need to get
3741 * a new stack from the TSS.
3742 */
3743 uint64_t uNewRsp;
3744 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3745 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3746 if ( uNewCpl != pIemCpu->uCpl
3747 || Idte.Gate.u3IST != 0)
3748 {
3749 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3750 if (rcStrict != VINF_SUCCESS)
3751 return rcStrict;
3752 }
3753 else
3754 uNewRsp = pCtx->rsp;
3755 uNewRsp &= ~(uint64_t)0xf;
3756
3757 /*
3758 * Calc the flag image to push.
3759 */
3760 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3761 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3762 fEfl &= ~X86_EFL_RF;
3763 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3764 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3765
3766 /*
3767 * Start making changes.
3768 */
3769
3770 /* Create the stack frame. */
3771 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3772 RTPTRUNION uStackFrame;
3773 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3774 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 void * const pvStackFrame = uStackFrame.pv;
3778
3779 if (fFlags & IEM_XCPT_FLAGS_ERR)
3780 *uStackFrame.pu64++ = uErr;
3781 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3782 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3783 uStackFrame.pu64[2] = fEfl;
3784 uStackFrame.pu64[3] = pCtx->rsp;
3785 uStackFrame.pu64[4] = pCtx->ss.Sel;
3786 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3787 if (rcStrict != VINF_SUCCESS)
3788 return rcStrict;
3789
3790 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3791 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3792 * after pushing the stack frame? (Write protect the gdt + stack to
3793 * find out.) */
3794 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3795 {
3796 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3797 if (rcStrict != VINF_SUCCESS)
3798 return rcStrict;
3799 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3800 }
3801
3802 /*
3803 * Start comitting the register changes.
3804 */
3805 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3806 * hidden registers when interrupting 32-bit or 16-bit code! */
3807 if (uNewCpl != pIemCpu->uCpl)
3808 {
3809 pCtx->ss.Sel = 0 | uNewCpl;
3810 pCtx->ss.ValidSel = 0 | uNewCpl;
3811 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3812 pCtx->ss.u32Limit = UINT32_MAX;
3813 pCtx->ss.u64Base = 0;
3814 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3815 }
3816 pCtx->rsp = uNewRsp - cbStackFrame;
3817 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3818 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3819 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3820 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3821 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3822 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3823 pCtx->rip = uNewRip;
3824 pIemCpu->uCpl = uNewCpl;
3825
3826 fEfl &= ~fEflToClear;
3827 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3828
3829 if (fFlags & IEM_XCPT_FLAGS_CR2)
3830 pCtx->cr2 = uCr2;
3831
3832 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3833 iemRaiseXcptAdjustState(pCtx, u8Vector);
3834
3835 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3836}
3837
3838
3839/**
3840 * Implements exceptions and interrupts.
3841 *
3842 * All exceptions and interrupts goes thru this function!
3843 *
3844 * @returns VBox strict status code.
3845 * @param pIemCpu The IEM per CPU instance data.
3846 * @param cbInstr The number of bytes to offset rIP by in the return
3847 * address.
3848 * @param u8Vector The interrupt / exception vector number.
3849 * @param fFlags The flags.
3850 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3851 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3852 */
3853DECL_NO_INLINE(static, VBOXSTRICTRC)
3854iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3855 uint8_t cbInstr,
3856 uint8_t u8Vector,
3857 uint32_t fFlags,
3858 uint16_t uErr,
3859 uint64_t uCr2)
3860{
3861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3862
3863 /*
3864 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3865 */
3866 if ( pCtx->eflags.Bits.u1VM
3867 && pCtx->eflags.Bits.u2IOPL != 3
3868 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3869 && (pCtx->cr0 & X86_CR0_PE) )
3870 {
3871 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3872 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3873 u8Vector = X86_XCPT_GP;
3874 uErr = 0;
3875 }
3876#ifdef DBGFTRACE_ENABLED
3877 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3878 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3879 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3880#endif
3881
3882 /*
3883 * Do recursion accounting.
3884 */
3885 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3886 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3887 if (pIemCpu->cXcptRecursions == 0)
3888 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3889 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3890 else
3891 {
3892 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3893 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3894
3895 /** @todo double and tripple faults. */
3896 if (pIemCpu->cXcptRecursions >= 3)
3897 {
3898#ifdef DEBUG_bird
3899 AssertFailed();
3900#endif
3901 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3902 }
3903
3904 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3905 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3906 {
3907 ....
3908 } */
3909 }
3910 pIemCpu->cXcptRecursions++;
3911 pIemCpu->uCurXcpt = u8Vector;
3912 pIemCpu->fCurXcpt = fFlags;
3913
3914 /*
3915 * Extensive logging.
3916 */
3917#if defined(LOG_ENABLED) && defined(IN_RING3)
3918 if (LogIs3Enabled())
3919 {
3920 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3922 char szRegs[4096];
3923 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3924 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3925 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3926 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3927 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3928 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3929 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3930 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3931 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3932 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3933 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3934 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3935 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3936 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3937 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3938 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3939 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3940 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3941 " efer=%016VR{efer}\n"
3942 " pat=%016VR{pat}\n"
3943 " sf_mask=%016VR{sf_mask}\n"
3944 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3945 " lstar=%016VR{lstar}\n"
3946 " star=%016VR{star} cstar=%016VR{cstar}\n"
3947 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3948 );
3949
3950 char szInstr[256];
3951 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3952 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3953 szInstr, sizeof(szInstr), NULL);
3954 Log3(("%s%s\n", szRegs, szInstr));
3955 }
3956#endif /* LOG_ENABLED */
3957
3958 /*
3959 * Call the mode specific worker function.
3960 */
3961 VBOXSTRICTRC rcStrict;
3962 if (!(pCtx->cr0 & X86_CR0_PE))
3963 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3964 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3965 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3966 else
3967 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3968
3969 /*
3970 * Unwind.
3971 */
3972 pIemCpu->cXcptRecursions--;
3973 pIemCpu->uCurXcpt = uPrevXcpt;
3974 pIemCpu->fCurXcpt = fPrevXcpt;
3975 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
3976 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
3977 return rcStrict;
3978}
3979
3980
3981/** \#DE - 00. */
3982DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
3983{
3984 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3985}
3986
3987
3988/** \#DB - 01.
3989 * @note This automatically clear DR7.GD. */
3990DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
3991{
3992 /** @todo set/clear RF. */
3993 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
3994 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3995}
3996
3997
3998/** \#UD - 06. */
3999DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4000{
4001 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4002}
4003
4004
4005/** \#NM - 07. */
4006DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4007{
4008 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4009}
4010
4011
4012/** \#TS(err) - 0a. */
4013DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4014{
4015 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4016}
4017
4018
4019/** \#TS(tr) - 0a. */
4020DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4021{
4022 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4023 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4024}
4025
4026
4027/** \#TS(0) - 0a. */
4028DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4029{
4030 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4031 0, 0);
4032}
4033
4034
4035/** \#TS(err) - 0a. */
4036DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4037{
4038 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4039 uSel & X86_SEL_MASK_OFF_RPL, 0);
4040}
4041
4042
4043/** \#NP(err) - 0b. */
4044DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4045{
4046 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4047}
4048
4049
4050/** \#NP(seg) - 0b. */
4051DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4052{
4053 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4054 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4055}
4056
4057
4058/** \#NP(sel) - 0b. */
4059DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4060{
4061 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4062 uSel & ~X86_SEL_RPL, 0);
4063}
4064
4065
4066/** \#SS(seg) - 0c. */
4067DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4068{
4069 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4070 uSel & ~X86_SEL_RPL, 0);
4071}
4072
4073
4074/** \#SS(err) - 0c. */
4075DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4076{
4077 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4078}
4079
4080
4081/** \#GP(n) - 0d. */
4082DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4083{
4084 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4085}
4086
4087
4088/** \#GP(0) - 0d. */
4089DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4090{
4091 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4092}
4093
4094
4095/** \#GP(sel) - 0d. */
4096DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4097{
4098 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4099 Sel & ~X86_SEL_RPL, 0);
4100}
4101
4102
4103/** \#GP(0) - 0d. */
4104DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4105{
4106 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4107}
4108
4109
4110/** \#GP(sel) - 0d. */
4111DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4112{
4113 NOREF(iSegReg); NOREF(fAccess);
4114 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4115 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4116}
4117
4118
4119/** \#GP(sel) - 0d. */
4120DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4121{
4122 NOREF(Sel);
4123 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4124}
4125
4126
4127/** \#GP(sel) - 0d. */
4128DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4129{
4130 NOREF(iSegReg); NOREF(fAccess);
4131 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4132}
4133
4134
4135/** \#PF(n) - 0e. */
4136DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4137{
4138 uint16_t uErr;
4139 switch (rc)
4140 {
4141 case VERR_PAGE_NOT_PRESENT:
4142 case VERR_PAGE_TABLE_NOT_PRESENT:
4143 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4144 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4145 uErr = 0;
4146 break;
4147
4148 default:
4149 AssertMsgFailed(("%Rrc\n", rc));
4150 case VERR_ACCESS_DENIED:
4151 uErr = X86_TRAP_PF_P;
4152 break;
4153
4154 /** @todo reserved */
4155 }
4156
4157 if (pIemCpu->uCpl == 3)
4158 uErr |= X86_TRAP_PF_US;
4159
4160 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4161 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4162 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4163 uErr |= X86_TRAP_PF_ID;
4164
4165#if 0 /* This is so much non-sense, really. Why was it done like that? */
4166 /* Note! RW access callers reporting a WRITE protection fault, will clear
4167 the READ flag before calling. So, read-modify-write accesses (RW)
4168 can safely be reported as READ faults. */
4169 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4170 uErr |= X86_TRAP_PF_RW;
4171#else
4172 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4173 {
4174 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4175 uErr |= X86_TRAP_PF_RW;
4176 }
4177#endif
4178
4179 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4180 uErr, GCPtrWhere);
4181}
4182
4183
4184/** \#MF(0) - 10. */
4185DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4186{
4187 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4188}
4189
4190
4191/** \#AC(0) - 11. */
4192DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4193{
4194 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4195}
4196
4197
4198/**
4199 * Macro for calling iemCImplRaiseDivideError().
4200 *
4201 * This enables us to add/remove arguments and force different levels of
4202 * inlining as we wish.
4203 *
4204 * @return Strict VBox status code.
4205 */
4206#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4207IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4208{
4209 NOREF(cbInstr);
4210 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4211}
4212
4213
4214/**
4215 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4216 *
4217 * This enables us to add/remove arguments and force different levels of
4218 * inlining as we wish.
4219 *
4220 * @return Strict VBox status code.
4221 */
4222#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4223IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4224{
4225 NOREF(cbInstr);
4226 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4227}
4228
4229
4230/**
4231 * Macro for calling iemCImplRaiseInvalidOpcode().
4232 *
4233 * This enables us to add/remove arguments and force different levels of
4234 * inlining as we wish.
4235 *
4236 * @return Strict VBox status code.
4237 */
4238#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4239IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4240{
4241 NOREF(cbInstr);
4242 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4243}
4244
4245
4246/** @} */
4247
4248
4249/*
4250 *
4251 * Helpers routines.
4252 * Helpers routines.
4253 * Helpers routines.
4254 *
4255 */
4256
4257/**
4258 * Recalculates the effective operand size.
4259 *
4260 * @param pIemCpu The IEM state.
4261 */
4262static void iemRecalEffOpSize(PIEMCPU pIemCpu)
4263{
4264 switch (pIemCpu->enmCpuMode)
4265 {
4266 case IEMMODE_16BIT:
4267 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4268 break;
4269 case IEMMODE_32BIT:
4270 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4271 break;
4272 case IEMMODE_64BIT:
4273 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4274 {
4275 case 0:
4276 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4277 break;
4278 case IEM_OP_PRF_SIZE_OP:
4279 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4280 break;
4281 case IEM_OP_PRF_SIZE_REX_W:
4282 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4283 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4284 break;
4285 }
4286 break;
4287 default:
4288 AssertFailed();
4289 }
4290}
4291
4292
4293/**
4294 * Sets the default operand size to 64-bit and recalculates the effective
4295 * operand size.
4296 *
4297 * @param pIemCpu The IEM state.
4298 */
4299static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4300{
4301 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4302 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4303 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4304 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4305 else
4306 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4307}
4308
4309
4310/*
4311 *
4312 * Common opcode decoders.
4313 * Common opcode decoders.
4314 * Common opcode decoders.
4315 *
4316 */
4317//#include <iprt/mem.h>
4318
4319/**
4320 * Used to add extra details about a stub case.
4321 * @param pIemCpu The IEM per CPU state.
4322 */
4323static void iemOpStubMsg2(PIEMCPU pIemCpu)
4324{
4325#if defined(LOG_ENABLED) && defined(IN_RING3)
4326 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4327 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4328 char szRegs[4096];
4329 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4330 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4331 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4332 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4333 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4334 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4335 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4336 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4337 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4338 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4339 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4340 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4341 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4342 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4343 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4344 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4345 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4346 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4347 " efer=%016VR{efer}\n"
4348 " pat=%016VR{pat}\n"
4349 " sf_mask=%016VR{sf_mask}\n"
4350 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4351 " lstar=%016VR{lstar}\n"
4352 " star=%016VR{star} cstar=%016VR{cstar}\n"
4353 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4354 );
4355
4356 char szInstr[256];
4357 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4358 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4359 szInstr, sizeof(szInstr), NULL);
4360
4361 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4362#else
4363 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4364#endif
4365}
4366
4367/**
4368 * Complains about a stub.
4369 *
4370 * Providing two versions of this macro, one for daily use and one for use when
4371 * working on IEM.
4372 */
4373#if 0
4374# define IEMOP_BITCH_ABOUT_STUB() \
4375 do { \
4376 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4377 iemOpStubMsg2(pIemCpu); \
4378 RTAssertPanic(); \
4379 } while (0)
4380#else
4381# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4382#endif
4383
4384/** Stubs an opcode. */
4385#define FNIEMOP_STUB(a_Name) \
4386 FNIEMOP_DEF(a_Name) \
4387 { \
4388 IEMOP_BITCH_ABOUT_STUB(); \
4389 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4390 } \
4391 typedef int ignore_semicolon
4392
4393/** Stubs an opcode. */
4394#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4395 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4396 { \
4397 IEMOP_BITCH_ABOUT_STUB(); \
4398 NOREF(a_Name0); \
4399 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4400 } \
4401 typedef int ignore_semicolon
4402
4403/** Stubs an opcode which currently should raise \#UD. */
4404#define FNIEMOP_UD_STUB(a_Name) \
4405 FNIEMOP_DEF(a_Name) \
4406 { \
4407 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4408 return IEMOP_RAISE_INVALID_OPCODE(); \
4409 } \
4410 typedef int ignore_semicolon
4411
4412/** Stubs an opcode which currently should raise \#UD. */
4413#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4414 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4415 { \
4416 NOREF(a_Name0); \
4417 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4418 return IEMOP_RAISE_INVALID_OPCODE(); \
4419 } \
4420 typedef int ignore_semicolon
4421
4422
4423
4424/** @name Register Access.
4425 * @{
4426 */
4427
4428/**
4429 * Gets a reference (pointer) to the specified hidden segment register.
4430 *
4431 * @returns Hidden register reference.
4432 * @param pIemCpu The per CPU data.
4433 * @param iSegReg The segment register.
4434 */
4435static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4436{
4437 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4438 PCPUMSELREG pSReg;
4439 switch (iSegReg)
4440 {
4441 case X86_SREG_ES: pSReg = &pCtx->es; break;
4442 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4443 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4444 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4445 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4446 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4447 default:
4448 AssertFailedReturn(NULL);
4449 }
4450#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4451 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4452 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4453#else
4454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4455#endif
4456 return pSReg;
4457}
4458
4459
4460/**
4461 * Gets a reference (pointer) to the specified segment register (the selector
4462 * value).
4463 *
4464 * @returns Pointer to the selector variable.
4465 * @param pIemCpu The per CPU data.
4466 * @param iSegReg The segment register.
4467 */
4468static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4469{
4470 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4471 switch (iSegReg)
4472 {
4473 case X86_SREG_ES: return &pCtx->es.Sel;
4474 case X86_SREG_CS: return &pCtx->cs.Sel;
4475 case X86_SREG_SS: return &pCtx->ss.Sel;
4476 case X86_SREG_DS: return &pCtx->ds.Sel;
4477 case X86_SREG_FS: return &pCtx->fs.Sel;
4478 case X86_SREG_GS: return &pCtx->gs.Sel;
4479 }
4480 AssertFailedReturn(NULL);
4481}
4482
4483
4484/**
4485 * Fetches the selector value of a segment register.
4486 *
4487 * @returns The selector value.
4488 * @param pIemCpu The per CPU data.
4489 * @param iSegReg The segment register.
4490 */
4491static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4492{
4493 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4494 switch (iSegReg)
4495 {
4496 case X86_SREG_ES: return pCtx->es.Sel;
4497 case X86_SREG_CS: return pCtx->cs.Sel;
4498 case X86_SREG_SS: return pCtx->ss.Sel;
4499 case X86_SREG_DS: return pCtx->ds.Sel;
4500 case X86_SREG_FS: return pCtx->fs.Sel;
4501 case X86_SREG_GS: return pCtx->gs.Sel;
4502 }
4503 AssertFailedReturn(0xffff);
4504}
4505
4506
4507/**
4508 * Gets a reference (pointer) to the specified general register.
4509 *
4510 * @returns Register reference.
4511 * @param pIemCpu The per CPU data.
4512 * @param iReg The general register.
4513 */
4514static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4515{
4516 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4517 switch (iReg)
4518 {
4519 case X86_GREG_xAX: return &pCtx->rax;
4520 case X86_GREG_xCX: return &pCtx->rcx;
4521 case X86_GREG_xDX: return &pCtx->rdx;
4522 case X86_GREG_xBX: return &pCtx->rbx;
4523 case X86_GREG_xSP: return &pCtx->rsp;
4524 case X86_GREG_xBP: return &pCtx->rbp;
4525 case X86_GREG_xSI: return &pCtx->rsi;
4526 case X86_GREG_xDI: return &pCtx->rdi;
4527 case X86_GREG_x8: return &pCtx->r8;
4528 case X86_GREG_x9: return &pCtx->r9;
4529 case X86_GREG_x10: return &pCtx->r10;
4530 case X86_GREG_x11: return &pCtx->r11;
4531 case X86_GREG_x12: return &pCtx->r12;
4532 case X86_GREG_x13: return &pCtx->r13;
4533 case X86_GREG_x14: return &pCtx->r14;
4534 case X86_GREG_x15: return &pCtx->r15;
4535 }
4536 AssertFailedReturn(NULL);
4537}
4538
4539
4540/**
4541 * Gets a reference (pointer) to the specified 8-bit general register.
4542 *
4543 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4544 *
4545 * @returns Register reference.
4546 * @param pIemCpu The per CPU data.
4547 * @param iReg The register.
4548 */
4549static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4550{
4551 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4552 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4553
4554 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4555 if (iReg >= 4)
4556 pu8Reg++;
4557 return pu8Reg;
4558}
4559
4560
4561/**
4562 * Fetches the value of a 8-bit general register.
4563 *
4564 * @returns The register value.
4565 * @param pIemCpu The per CPU data.
4566 * @param iReg The register.
4567 */
4568static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4569{
4570 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4571 return *pbSrc;
4572}
4573
4574
4575/**
4576 * Fetches the value of a 16-bit general register.
4577 *
4578 * @returns The register value.
4579 * @param pIemCpu The per CPU data.
4580 * @param iReg The register.
4581 */
4582static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4583{
4584 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4585}
4586
4587
4588/**
4589 * Fetches the value of a 32-bit general register.
4590 *
4591 * @returns The register value.
4592 * @param pIemCpu The per CPU data.
4593 * @param iReg The register.
4594 */
4595static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4596{
4597 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4598}
4599
4600
4601/**
4602 * Fetches the value of a 64-bit general register.
4603 *
4604 * @returns The register value.
4605 * @param pIemCpu The per CPU data.
4606 * @param iReg The register.
4607 */
4608static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4609{
4610 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4611}
4612
4613
4614/**
4615 * Is the FPU state in FXSAVE format or not.
4616 *
4617 * @returns true if it is, false if it's in FNSAVE.
4618 * @param pVCpu Pointer to the VMCPU.
4619 */
4620DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
4621{
4622#ifdef RT_ARCH_AMD64
4623 NOREF(pIemCpu);
4624 return true;
4625#else
4626 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
4627 return true;
4628#endif
4629}
4630
4631
4632/**
4633 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4634 *
4635 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4636 * segment limit.
4637 *
4638 * @param pIemCpu The per CPU data.
4639 * @param offNextInstr The offset of the next instruction.
4640 */
4641static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4642{
4643 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4644 switch (pIemCpu->enmEffOpSize)
4645 {
4646 case IEMMODE_16BIT:
4647 {
4648 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4649 if ( uNewIp > pCtx->cs.u32Limit
4650 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4651 return iemRaiseGeneralProtectionFault0(pIemCpu);
4652 pCtx->rip = uNewIp;
4653 break;
4654 }
4655
4656 case IEMMODE_32BIT:
4657 {
4658 Assert(pCtx->rip <= UINT32_MAX);
4659 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4660
4661 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4662 if (uNewEip > pCtx->cs.u32Limit)
4663 return iemRaiseGeneralProtectionFault0(pIemCpu);
4664 pCtx->rip = uNewEip;
4665 break;
4666 }
4667
4668 case IEMMODE_64BIT:
4669 {
4670 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4671
4672 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4673 if (!IEM_IS_CANONICAL(uNewRip))
4674 return iemRaiseGeneralProtectionFault0(pIemCpu);
4675 pCtx->rip = uNewRip;
4676 break;
4677 }
4678
4679 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4680 }
4681
4682 pCtx->eflags.Bits.u1RF = 0;
4683 return VINF_SUCCESS;
4684}
4685
4686
4687/**
4688 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4689 *
4690 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4691 * segment limit.
4692 *
4693 * @returns Strict VBox status code.
4694 * @param pIemCpu The per CPU data.
4695 * @param offNextInstr The offset of the next instruction.
4696 */
4697static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4698{
4699 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4700 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4701
4702 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4703 if ( uNewIp > pCtx->cs.u32Limit
4704 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4705 return iemRaiseGeneralProtectionFault0(pIemCpu);
4706 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4707 pCtx->rip = uNewIp;
4708 pCtx->eflags.Bits.u1RF = 0;
4709
4710 return VINF_SUCCESS;
4711}
4712
4713
4714/**
4715 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4716 *
4717 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4718 * segment limit.
4719 *
4720 * @returns Strict VBox status code.
4721 * @param pIemCpu The per CPU data.
4722 * @param offNextInstr The offset of the next instruction.
4723 */
4724static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4725{
4726 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4727 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4728
4729 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4730 {
4731 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4732
4733 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4734 if (uNewEip > pCtx->cs.u32Limit)
4735 return iemRaiseGeneralProtectionFault0(pIemCpu);
4736 pCtx->rip = uNewEip;
4737 }
4738 else
4739 {
4740 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4741
4742 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4743 if (!IEM_IS_CANONICAL(uNewRip))
4744 return iemRaiseGeneralProtectionFault0(pIemCpu);
4745 pCtx->rip = uNewRip;
4746 }
4747 pCtx->eflags.Bits.u1RF = 0;
4748 return VINF_SUCCESS;
4749}
4750
4751
4752/**
4753 * Performs a near jump to the specified address.
4754 *
4755 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4756 * segment limit.
4757 *
4758 * @param pIemCpu The per CPU data.
4759 * @param uNewRip The new RIP value.
4760 */
4761static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4762{
4763 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4764 switch (pIemCpu->enmEffOpSize)
4765 {
4766 case IEMMODE_16BIT:
4767 {
4768 Assert(uNewRip <= UINT16_MAX);
4769 if ( uNewRip > pCtx->cs.u32Limit
4770 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4771 return iemRaiseGeneralProtectionFault0(pIemCpu);
4772 /** @todo Test 16-bit jump in 64-bit mode. */
4773 pCtx->rip = uNewRip;
4774 break;
4775 }
4776
4777 case IEMMODE_32BIT:
4778 {
4779 Assert(uNewRip <= UINT32_MAX);
4780 Assert(pCtx->rip <= UINT32_MAX);
4781 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4782
4783 if (uNewRip > pCtx->cs.u32Limit)
4784 return iemRaiseGeneralProtectionFault0(pIemCpu);
4785 pCtx->rip = uNewRip;
4786 break;
4787 }
4788
4789 case IEMMODE_64BIT:
4790 {
4791 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4792
4793 if (!IEM_IS_CANONICAL(uNewRip))
4794 return iemRaiseGeneralProtectionFault0(pIemCpu);
4795 pCtx->rip = uNewRip;
4796 break;
4797 }
4798
4799 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4800 }
4801
4802 pCtx->eflags.Bits.u1RF = 0;
4803 return VINF_SUCCESS;
4804}
4805
4806
4807/**
4808 * Get the address of the top of the stack.
4809 *
4810 * @param pIemCpu The per CPU data.
4811 * @param pCtx The CPU context which SP/ESP/RSP should be
4812 * read.
4813 */
4814DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4815{
4816 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4817 return pCtx->rsp;
4818 if (pCtx->ss.Attr.n.u1DefBig)
4819 return pCtx->esp;
4820 return pCtx->sp;
4821}
4822
4823
4824/**
4825 * Updates the RIP/EIP/IP to point to the next instruction.
4826 *
4827 * This function leaves the EFLAGS.RF flag alone.
4828 *
4829 * @param pIemCpu The per CPU data.
4830 * @param cbInstr The number of bytes to add.
4831 */
4832static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4833{
4834 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4835 switch (pIemCpu->enmCpuMode)
4836 {
4837 case IEMMODE_16BIT:
4838 Assert(pCtx->rip <= UINT16_MAX);
4839 pCtx->eip += cbInstr;
4840 pCtx->eip &= UINT32_C(0xffff);
4841 break;
4842
4843 case IEMMODE_32BIT:
4844 pCtx->eip += cbInstr;
4845 Assert(pCtx->rip <= UINT32_MAX);
4846 break;
4847
4848 case IEMMODE_64BIT:
4849 pCtx->rip += cbInstr;
4850 break;
4851 default: AssertFailed();
4852 }
4853}
4854
4855
4856#if 0
4857/**
4858 * Updates the RIP/EIP/IP to point to the next instruction.
4859 *
4860 * @param pIemCpu The per CPU data.
4861 */
4862static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4863{
4864 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4865}
4866#endif
4867
4868
4869
4870/**
4871 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4872 *
4873 * @param pIemCpu The per CPU data.
4874 * @param cbInstr The number of bytes to add.
4875 */
4876static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4877{
4878 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4879
4880 pCtx->eflags.Bits.u1RF = 0;
4881
4882 switch (pIemCpu->enmCpuMode)
4883 {
4884 case IEMMODE_16BIT:
4885 Assert(pCtx->rip <= UINT16_MAX);
4886 pCtx->eip += cbInstr;
4887 pCtx->eip &= UINT32_C(0xffff);
4888 break;
4889
4890 case IEMMODE_32BIT:
4891 pCtx->eip += cbInstr;
4892 Assert(pCtx->rip <= UINT32_MAX);
4893 break;
4894
4895 case IEMMODE_64BIT:
4896 pCtx->rip += cbInstr;
4897 break;
4898 default: AssertFailed();
4899 }
4900}
4901
4902
4903/**
4904 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4905 *
4906 * @param pIemCpu The per CPU data.
4907 */
4908static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4909{
4910 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4911}
4912
4913
4914/**
4915 * Adds to the stack pointer.
4916 *
4917 * @param pIemCpu The per CPU data.
4918 * @param pCtx The CPU context which SP/ESP/RSP should be
4919 * updated.
4920 * @param cbToAdd The number of bytes to add.
4921 */
4922DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4923{
4924 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4925 pCtx->rsp += cbToAdd;
4926 else if (pCtx->ss.Attr.n.u1DefBig)
4927 pCtx->esp += cbToAdd;
4928 else
4929 pCtx->sp += cbToAdd;
4930}
4931
4932
4933/**
4934 * Subtracts from the stack pointer.
4935 *
4936 * @param pIemCpu The per CPU data.
4937 * @param pCtx The CPU context which SP/ESP/RSP should be
4938 * updated.
4939 * @param cbToSub The number of bytes to subtract.
4940 */
4941DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4942{
4943 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4944 pCtx->rsp -= cbToSub;
4945 else if (pCtx->ss.Attr.n.u1DefBig)
4946 pCtx->esp -= cbToSub;
4947 else
4948 pCtx->sp -= cbToSub;
4949}
4950
4951
4952/**
4953 * Adds to the temporary stack pointer.
4954 *
4955 * @param pIemCpu The per CPU data.
4956 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4957 * @param cbToAdd The number of bytes to add.
4958 * @param pCtx Where to get the current stack mode.
4959 */
4960DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4961{
4962 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4963 pTmpRsp->u += cbToAdd;
4964 else if (pCtx->ss.Attr.n.u1DefBig)
4965 pTmpRsp->DWords.dw0 += cbToAdd;
4966 else
4967 pTmpRsp->Words.w0 += cbToAdd;
4968}
4969
4970
4971/**
4972 * Subtracts from the temporary stack pointer.
4973 *
4974 * @param pIemCpu The per CPU data.
4975 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4976 * @param cbToSub The number of bytes to subtract.
4977 * @param pCtx Where to get the current stack mode.
4978 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4979 * expecting that.
4980 */
4981DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
4982{
4983 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4984 pTmpRsp->u -= cbToSub;
4985 else if (pCtx->ss.Attr.n.u1DefBig)
4986 pTmpRsp->DWords.dw0 -= cbToSub;
4987 else
4988 pTmpRsp->Words.w0 -= cbToSub;
4989}
4990
4991
4992/**
4993 * Calculates the effective stack address for a push of the specified size as
4994 * well as the new RSP value (upper bits may be masked).
4995 *
4996 * @returns Effective stack addressf for the push.
4997 * @param pIemCpu The IEM per CPU data.
4998 * @param pCtx Where to get the current stack mode.
4999 * @param cbItem The size of the stack item to pop.
5000 * @param puNewRsp Where to return the new RSP value.
5001 */
5002DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5003{
5004 RTUINT64U uTmpRsp;
5005 RTGCPTR GCPtrTop;
5006 uTmpRsp.u = pCtx->rsp;
5007
5008 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5009 GCPtrTop = uTmpRsp.u -= cbItem;
5010 else if (pCtx->ss.Attr.n.u1DefBig)
5011 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5012 else
5013 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5014 *puNewRsp = uTmpRsp.u;
5015 return GCPtrTop;
5016}
5017
5018
5019/**
5020 * Gets the current stack pointer and calculates the value after a pop of the
5021 * specified size.
5022 *
5023 * @returns Current stack pointer.
5024 * @param pIemCpu The per CPU data.
5025 * @param pCtx Where to get the current stack mode.
5026 * @param cbItem The size of the stack item to pop.
5027 * @param puNewRsp Where to return the new RSP value.
5028 */
5029DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5030{
5031 RTUINT64U uTmpRsp;
5032 RTGCPTR GCPtrTop;
5033 uTmpRsp.u = pCtx->rsp;
5034
5035 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5036 {
5037 GCPtrTop = uTmpRsp.u;
5038 uTmpRsp.u += cbItem;
5039 }
5040 else if (pCtx->ss.Attr.n.u1DefBig)
5041 {
5042 GCPtrTop = uTmpRsp.DWords.dw0;
5043 uTmpRsp.DWords.dw0 += cbItem;
5044 }
5045 else
5046 {
5047 GCPtrTop = uTmpRsp.Words.w0;
5048 uTmpRsp.Words.w0 += cbItem;
5049 }
5050 *puNewRsp = uTmpRsp.u;
5051 return GCPtrTop;
5052}
5053
5054
5055/**
5056 * Calculates the effective stack address for a push of the specified size as
5057 * well as the new temporary RSP value (upper bits may be masked).
5058 *
5059 * @returns Effective stack addressf for the push.
5060 * @param pIemCpu The per CPU data.
5061 * @param pTmpRsp The temporary stack pointer. This is updated.
5062 * @param cbItem The size of the stack item to pop.
5063 * @param puNewRsp Where to return the new RSP value.
5064 */
5065DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5066{
5067 RTGCPTR GCPtrTop;
5068
5069 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5070 GCPtrTop = pTmpRsp->u -= cbItem;
5071 else if (pCtx->ss.Attr.n.u1DefBig)
5072 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5073 else
5074 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5075 return GCPtrTop;
5076}
5077
5078
5079/**
5080 * Gets the effective stack address for a pop of the specified size and
5081 * calculates and updates the temporary RSP.
5082 *
5083 * @returns Current stack pointer.
5084 * @param pIemCpu The per CPU data.
5085 * @param pTmpRsp The temporary stack pointer. This is updated.
5086 * @param pCtx Where to get the current stack mode.
5087 * @param cbItem The size of the stack item to pop.
5088 */
5089DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5090{
5091 RTGCPTR GCPtrTop;
5092 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5093 {
5094 GCPtrTop = pTmpRsp->u;
5095 pTmpRsp->u += cbItem;
5096 }
5097 else if (pCtx->ss.Attr.n.u1DefBig)
5098 {
5099 GCPtrTop = pTmpRsp->DWords.dw0;
5100 pTmpRsp->DWords.dw0 += cbItem;
5101 }
5102 else
5103 {
5104 GCPtrTop = pTmpRsp->Words.w0;
5105 pTmpRsp->Words.w0 += cbItem;
5106 }
5107 return GCPtrTop;
5108}
5109
5110
5111/**
5112 * Checks if an Intel CPUID feature bit is set.
5113 *
5114 * @returns true / false.
5115 *
5116 * @param pIemCpu The IEM per CPU data.
5117 * @param fEdx The EDX bit to test, or 0 if ECX.
5118 * @param fEcx The ECX bit to test, or 0 if EDX.
5119 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
5120 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
5121 */
5122static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5123{
5124 uint32_t uEax, uEbx, uEcx, uEdx;
5125 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
5126 return (fEcx && (uEcx & fEcx))
5127 || (fEdx && (uEdx & fEdx));
5128}
5129
5130
5131/**
5132 * Checks if an AMD CPUID feature bit is set.
5133 *
5134 * @returns true / false.
5135 *
5136 * @param pIemCpu The IEM per CPU data.
5137 * @param fEdx The EDX bit to test, or 0 if ECX.
5138 * @param fEcx The ECX bit to test, or 0 if EDX.
5139 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
5140 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
5141 */
5142static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
5143{
5144 uint32_t uEax, uEbx, uEcx, uEdx;
5145 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
5146 return (fEcx && (uEcx & fEcx))
5147 || (fEdx && (uEdx & fEdx));
5148}
5149
5150/** @} */
5151
5152
5153/** @name FPU access and helpers.
5154 *
5155 * @{
5156 */
5157
5158
5159/**
5160 * Hook for preparing to use the host FPU.
5161 *
5162 * This is necessary in ring-0 and raw-mode context.
5163 *
5164 * @param pIemCpu The IEM per CPU data.
5165 */
5166DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5167{
5168#ifdef IN_RING3
5169 NOREF(pIemCpu);
5170#else
5171/** @todo RZ: FIXME */
5172//# error "Implement me"
5173#endif
5174}
5175
5176
5177/**
5178 * Hook for preparing to use the host FPU for SSE
5179 *
5180 * This is necessary in ring-0 and raw-mode context.
5181 *
5182 * @param pIemCpu The IEM per CPU data.
5183 */
5184DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5185{
5186 iemFpuPrepareUsage(pIemCpu);
5187}
5188
5189
5190/**
5191 * Stores a QNaN value into a FPU register.
5192 *
5193 * @param pReg Pointer to the register.
5194 */
5195DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5196{
5197 pReg->au32[0] = UINT32_C(0x00000000);
5198 pReg->au32[1] = UINT32_C(0xc0000000);
5199 pReg->au16[4] = UINT16_C(0xffff);
5200}
5201
5202
5203/**
5204 * Updates the FOP, FPU.CS and FPUIP registers.
5205 *
5206 * @param pIemCpu The IEM per CPU data.
5207 * @param pCtx The CPU context.
5208 */
5209DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5210{
5211 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5212 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5213 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
5214 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5215 {
5216 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5217 * happens in real mode here based on the fnsave and fnstenv images. */
5218 pCtx->fpu.CS = 0;
5219 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5220 }
5221 else
5222 {
5223 pCtx->fpu.CS = pCtx->cs.Sel;
5224 pCtx->fpu.FPUIP = pCtx->rip;
5225 }
5226}
5227
5228
5229/**
5230 * Updates the FPU.DS and FPUDP registers.
5231 *
5232 * @param pIemCpu The IEM per CPU data.
5233 * @param pCtx The CPU context.
5234 * @param iEffSeg The effective segment register.
5235 * @param GCPtrEff The effective address relative to @a iEffSeg.
5236 */
5237DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5238{
5239 RTSEL sel;
5240 switch (iEffSeg)
5241 {
5242 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5243 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5244 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5245 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5246 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5247 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5248 default:
5249 AssertMsgFailed(("%d\n", iEffSeg));
5250 sel = pCtx->ds.Sel;
5251 }
5252 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
5253 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5254 {
5255 pCtx->fpu.DS = 0;
5256 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5257 }
5258 else
5259 {
5260 pCtx->fpu.DS = sel;
5261 pCtx->fpu.FPUDP = GCPtrEff;
5262 }
5263}
5264
5265
5266/**
5267 * Rotates the stack registers in the push direction.
5268 *
5269 * @param pCtx The CPU context.
5270 * @remarks This is a complete waste of time, but fxsave stores the registers in
5271 * stack order.
5272 */
5273DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
5274{
5275 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
5276 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
5277 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
5278 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
5279 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
5280 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
5281 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
5282 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
5283 pCtx->fpu.aRegs[0].r80 = r80Tmp;
5284}
5285
5286
5287/**
5288 * Rotates the stack registers in the pop direction.
5289 *
5290 * @param pCtx The CPU context.
5291 * @remarks This is a complete waste of time, but fxsave stores the registers in
5292 * stack order.
5293 */
5294DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
5295{
5296 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
5297 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
5298 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
5299 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
5300 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
5301 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
5302 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
5303 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
5304 pCtx->fpu.aRegs[7].r80 = r80Tmp;
5305}
5306
5307
5308/**
5309 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5310 * exception prevents it.
5311 *
5312 * @param pIemCpu The IEM per CPU data.
5313 * @param pResult The FPU operation result to push.
5314 * @param pCtx The CPU context.
5315 */
5316static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
5317{
5318 /* Update FSW and bail if there are pending exceptions afterwards. */
5319 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5320 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5321 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5322 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5323 {
5324 pCtx->fpu.FSW = fFsw;
5325 return;
5326 }
5327
5328 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5329 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5330 {
5331 /* All is fine, push the actual value. */
5332 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5333 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
5334 }
5335 else if (pCtx->fpu.FCW & X86_FCW_IM)
5336 {
5337 /* Masked stack overflow, push QNaN. */
5338 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5339 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5340 }
5341 else
5342 {
5343 /* Raise stack overflow, don't push anything. */
5344 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5345 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5346 return;
5347 }
5348
5349 fFsw &= ~X86_FSW_TOP_MASK;
5350 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5351 pCtx->fpu.FSW = fFsw;
5352
5353 iemFpuRotateStackPush(pCtx);
5354}
5355
5356
5357/**
5358 * Stores a result in a FPU register and updates the FSW and FTW.
5359 *
5360 * @param pIemCpu The IEM per CPU data.
5361 * @param pResult The result to store.
5362 * @param iStReg Which FPU register to store it in.
5363 * @param pCtx The CPU context.
5364 */
5365static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
5366{
5367 Assert(iStReg < 8);
5368 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5369 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5370 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5371 pCtx->fpu.FTW |= RT_BIT(iReg);
5372 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
5373}
5374
5375
5376/**
5377 * Only updates the FPU status word (FSW) with the result of the current
5378 * instruction.
5379 *
5380 * @param pCtx The CPU context.
5381 * @param u16FSW The FSW output of the current instruction.
5382 */
5383static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
5384{
5385 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5386 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5387}
5388
5389
5390/**
5391 * Pops one item off the FPU stack if no pending exception prevents it.
5392 *
5393 * @param pCtx The CPU context.
5394 */
5395static void iemFpuMaybePopOne(PCPUMCTX pCtx)
5396{
5397 /* Check pending exceptions. */
5398 uint16_t uFSW = pCtx->fpu.FSW;
5399 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5400 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5401 return;
5402
5403 /* TOP--. */
5404 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5405 uFSW &= ~X86_FSW_TOP_MASK;
5406 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5407 pCtx->fpu.FSW = uFSW;
5408
5409 /* Mark the previous ST0 as empty. */
5410 iOldTop >>= X86_FSW_TOP_SHIFT;
5411 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
5412
5413 /* Rotate the registers. */
5414 iemFpuRotateStackPop(pCtx);
5415}
5416
5417
5418/**
5419 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5420 *
5421 * @param pIemCpu The IEM per CPU data.
5422 * @param pResult The FPU operation result to push.
5423 */
5424static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5425{
5426 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5427 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5428 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5429}
5430
5431
5432/**
5433 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5434 * and sets FPUDP and FPUDS.
5435 *
5436 * @param pIemCpu The IEM per CPU data.
5437 * @param pResult The FPU operation result to push.
5438 * @param iEffSeg The effective segment register.
5439 * @param GCPtrEff The effective address relative to @a iEffSeg.
5440 */
5441static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5442{
5443 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5444 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5445 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5446 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
5447}
5448
5449
5450/**
5451 * Replace ST0 with the first value and push the second onto the FPU stack,
5452 * unless a pending exception prevents it.
5453 *
5454 * @param pIemCpu The IEM per CPU data.
5455 * @param pResult The FPU operation result to store and push.
5456 */
5457static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5458{
5459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5460 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5461
5462 /* Update FSW and bail if there are pending exceptions afterwards. */
5463 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
5464 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5465 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5466 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5467 {
5468 pCtx->fpu.FSW = fFsw;
5469 return;
5470 }
5471
5472 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5473 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
5474 {
5475 /* All is fine, push the actual value. */
5476 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5477 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
5478 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
5479 }
5480 else if (pCtx->fpu.FCW & X86_FCW_IM)
5481 {
5482 /* Masked stack overflow, push QNaN. */
5483 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5484 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5485 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5486 }
5487 else
5488 {
5489 /* Raise stack overflow, don't push anything. */
5490 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5491 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5492 return;
5493 }
5494
5495 fFsw &= ~X86_FSW_TOP_MASK;
5496 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5497 pCtx->fpu.FSW = fFsw;
5498
5499 iemFpuRotateStackPush(pCtx);
5500}
5501
5502
5503/**
5504 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5505 * FOP.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The result to store.
5509 * @param iStReg Which FPU register to store it in.
5510 * @param pCtx The CPU context.
5511 */
5512static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5513{
5514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5515 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5516 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5517}
5518
5519
5520/**
5521 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5522 * FOP, and then pops the stack.
5523 *
5524 * @param pIemCpu The IEM per CPU data.
5525 * @param pResult The result to store.
5526 * @param iStReg Which FPU register to store it in.
5527 * @param pCtx The CPU context.
5528 */
5529static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5530{
5531 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5532 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5533 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5534 iemFpuMaybePopOne(pCtx);
5535}
5536
5537
5538/**
5539 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5540 * FPUDP, and FPUDS.
5541 *
5542 * @param pIemCpu The IEM per CPU data.
5543 * @param pResult The result to store.
5544 * @param iStReg Which FPU register to store it in.
5545 * @param pCtx The CPU context.
5546 * @param iEffSeg The effective memory operand selector register.
5547 * @param GCPtrEff The effective memory operand offset.
5548 */
5549static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5550{
5551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5552 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
5553 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5554 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5555}
5556
5557
5558/**
5559 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5560 * FPUDP, and FPUDS, and then pops the stack.
5561 *
5562 * @param pIemCpu The IEM per CPU data.
5563 * @param pResult The result to store.
5564 * @param iStReg Which FPU register to store it in.
5565 * @param pCtx The CPU context.
5566 * @param iEffSeg The effective memory operand selector register.
5567 * @param GCPtrEff The effective memory operand offset.
5568 */
5569static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5570 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5571{
5572 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5573 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5574 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5575 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
5576 iemFpuMaybePopOne(pCtx);
5577}
5578
5579
5580/**
5581 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5582 *
5583 * @param pIemCpu The IEM per CPU data.
5584 */
5585static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5586{
5587 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
5588}
5589
5590
5591/**
5592 * Marks the specified stack register as free (for FFREE).
5593 *
5594 * @param pIemCpu The IEM per CPU data.
5595 * @param iStReg The register to free.
5596 */
5597static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5598{
5599 Assert(iStReg < 8);
5600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5601 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5602 pCtx->fpu.FTW &= ~RT_BIT(iReg);
5603}
5604
5605
5606/**
5607 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5608 *
5609 * @param pIemCpu The IEM per CPU data.
5610 */
5611static void iemFpuStackIncTop(PIEMCPU pIemCpu)
5612{
5613 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5614 uint16_t uFsw = pCtx->fpu.FSW;
5615 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5616 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5617 uFsw &= ~X86_FSW_TOP_MASK;
5618 uFsw |= uTop;
5619 pCtx->fpu.FSW = uFsw;
5620}
5621
5622
5623/**
5624 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5625 *
5626 * @param pIemCpu The IEM per CPU data.
5627 */
5628static void iemFpuStackDecTop(PIEMCPU pIemCpu)
5629{
5630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5631 uint16_t uFsw = pCtx->fpu.FSW;
5632 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5633 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5634 uFsw &= ~X86_FSW_TOP_MASK;
5635 uFsw |= uTop;
5636 pCtx->fpu.FSW = uFsw;
5637}
5638
5639
5640/**
5641 * Updates the FSW, FOP, FPUIP, and FPUCS.
5642 *
5643 * @param pIemCpu The IEM per CPU data.
5644 * @param u16FSW The FSW from the current instruction.
5645 */
5646static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5647{
5648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5649 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5650 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5651}
5652
5653
5654/**
5655 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5656 *
5657 * @param pIemCpu The IEM per CPU data.
5658 * @param u16FSW The FSW from the current instruction.
5659 */
5660static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5661{
5662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5663 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5664 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5665 iemFpuMaybePopOne(pCtx);
5666}
5667
5668
5669/**
5670 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5671 *
5672 * @param pIemCpu The IEM per CPU data.
5673 * @param u16FSW The FSW from the current instruction.
5674 * @param iEffSeg The effective memory operand selector register.
5675 * @param GCPtrEff The effective memory operand offset.
5676 */
5677static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5678{
5679 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5680 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5681 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5682 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5683}
5684
5685
5686/**
5687 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5688 *
5689 * @param pIemCpu The IEM per CPU data.
5690 * @param u16FSW The FSW from the current instruction.
5691 */
5692static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5693{
5694 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5695 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5696 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5697 iemFpuMaybePopOne(pCtx);
5698 iemFpuMaybePopOne(pCtx);
5699}
5700
5701
5702/**
5703 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5704 *
5705 * @param pIemCpu The IEM per CPU data.
5706 * @param u16FSW The FSW from the current instruction.
5707 * @param iEffSeg The effective memory operand selector register.
5708 * @param GCPtrEff The effective memory operand offset.
5709 */
5710static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5711{
5712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5713 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5714 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5715 iemFpuUpdateFSWOnly(pCtx, u16FSW);
5716 iemFpuMaybePopOne(pCtx);
5717}
5718
5719
5720/**
5721 * Worker routine for raising an FPU stack underflow exception.
5722 *
5723 * @param pIemCpu The IEM per CPU data.
5724 * @param iStReg The stack register being accessed.
5725 * @param pCtx The CPU context.
5726 */
5727static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
5728{
5729 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5730 if (pCtx->fpu.FCW & X86_FCW_IM)
5731 {
5732 /* Masked underflow. */
5733 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5734 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5735 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5736 if (iStReg != UINT8_MAX)
5737 {
5738 pCtx->fpu.FTW |= RT_BIT(iReg);
5739 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
5740 }
5741 }
5742 else
5743 {
5744 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5745 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5746 }
5747}
5748
5749
5750/**
5751 * Raises a FPU stack underflow exception.
5752 *
5753 * @param pIemCpu The IEM per CPU data.
5754 * @param iStReg The destination register that should be loaded
5755 * with QNaN if \#IS is not masked. Specify
5756 * UINT8_MAX if none (like for fcom).
5757 */
5758DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5759{
5760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5761 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5762 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5763}
5764
5765
5766DECL_NO_INLINE(static, void)
5767iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5768{
5769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5770 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5771 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5772 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5773}
5774
5775
5776DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5777{
5778 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5779 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5780 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5781 iemFpuMaybePopOne(pCtx);
5782}
5783
5784
5785DECL_NO_INLINE(static, void)
5786iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5787{
5788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5789 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5790 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5791 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
5792 iemFpuMaybePopOne(pCtx);
5793}
5794
5795
5796DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5797{
5798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5799 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5800 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
5801 iemFpuMaybePopOne(pCtx);
5802 iemFpuMaybePopOne(pCtx);
5803}
5804
5805
5806DECL_NO_INLINE(static, void)
5807iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5808{
5809 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5810 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5811
5812 if (pCtx->fpu.FCW & X86_FCW_IM)
5813 {
5814 /* Masked overflow - Push QNaN. */
5815 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5816 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5817 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5818 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5819 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5820 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5821 iemFpuRotateStackPush(pCtx);
5822 }
5823 else
5824 {
5825 /* Exception pending - don't change TOP or the register stack. */
5826 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5827 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5828 }
5829}
5830
5831
5832DECL_NO_INLINE(static, void)
5833iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5834{
5835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5836 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5837
5838 if (pCtx->fpu.FCW & X86_FCW_IM)
5839 {
5840 /* Masked overflow - Push QNaN. */
5841 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5842 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5843 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5844 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5845 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5846 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
5847 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5848 iemFpuRotateStackPush(pCtx);
5849 }
5850 else
5851 {
5852 /* Exception pending - don't change TOP or the register stack. */
5853 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5854 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5855 }
5856}
5857
5858
5859/**
5860 * Worker routine for raising an FPU stack overflow exception on a push.
5861 *
5862 * @param pIemCpu The IEM per CPU data.
5863 * @param pCtx The CPU context.
5864 */
5865static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
5866{
5867 if (pCtx->fpu.FCW & X86_FCW_IM)
5868 {
5869 /* Masked overflow. */
5870 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
5871 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5872 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5873 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5874 pCtx->fpu.FTW |= RT_BIT(iNewTop);
5875 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
5876 iemFpuRotateStackPush(pCtx);
5877 }
5878 else
5879 {
5880 /* Exception pending - don't change TOP or the register stack. */
5881 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
5882 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5883 }
5884}
5885
5886
5887/**
5888 * Raises a FPU stack overflow exception on a push.
5889 *
5890 * @param pIemCpu The IEM per CPU data.
5891 */
5892DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5893{
5894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5895 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5896 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5897}
5898
5899
5900/**
5901 * Raises a FPU stack overflow exception on a push with a memory operand.
5902 *
5903 * @param pIemCpu The IEM per CPU data.
5904 * @param iEffSeg The effective memory operand selector register.
5905 * @param GCPtrEff The effective memory operand offset.
5906 */
5907DECL_NO_INLINE(static, void)
5908iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5909{
5910 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5911 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
5912 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5913 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
5914}
5915
5916
5917static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5918{
5919 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5920 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5921 if (pCtx->fpu.FTW & RT_BIT(iReg))
5922 return VINF_SUCCESS;
5923 return VERR_NOT_FOUND;
5924}
5925
5926
5927static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5928{
5929 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5930 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
5931 if (pCtx->fpu.FTW & RT_BIT(iReg))
5932 {
5933 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
5934 return VINF_SUCCESS;
5935 }
5936 return VERR_NOT_FOUND;
5937}
5938
5939
5940static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5941 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5942{
5943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5944 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5945 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5946 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5947 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5948 {
5949 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5950 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
5951 return VINF_SUCCESS;
5952 }
5953 return VERR_NOT_FOUND;
5954}
5955
5956
5957static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5958{
5959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5960 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5961 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5962 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5963 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5964 {
5965 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5966 return VINF_SUCCESS;
5967 }
5968 return VERR_NOT_FOUND;
5969}
5970
5971
5972/**
5973 * Updates the FPU exception status after FCW is changed.
5974 *
5975 * @param pCtx The CPU context.
5976 */
5977static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
5978{
5979 uint16_t u16Fsw = pCtx->fpu.FSW;
5980 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
5981 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5982 else
5983 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5984 pCtx->fpu.FSW = u16Fsw;
5985}
5986
5987
5988/**
5989 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5990 *
5991 * @returns The full FTW.
5992 * @param pCtx The CPU state.
5993 */
5994static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
5995{
5996 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
5997 uint16_t u16Ftw = 0;
5998 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5999 for (unsigned iSt = 0; iSt < 8; iSt++)
6000 {
6001 unsigned const iReg = (iSt + iTop) & 7;
6002 if (!(u8Ftw & RT_BIT(iReg)))
6003 u16Ftw |= 3 << (iReg * 2); /* empty */
6004 else
6005 {
6006 uint16_t uTag;
6007 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
6008 if (pr80Reg->s.uExponent == 0x7fff)
6009 uTag = 2; /* Exponent is all 1's => Special. */
6010 else if (pr80Reg->s.uExponent == 0x0000)
6011 {
6012 if (pr80Reg->s.u64Mantissa == 0x0000)
6013 uTag = 1; /* All bits are zero => Zero. */
6014 else
6015 uTag = 2; /* Must be special. */
6016 }
6017 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6018 uTag = 0; /* Valid. */
6019 else
6020 uTag = 2; /* Must be special. */
6021
6022 u16Ftw |= uTag << (iReg * 2); /* empty */
6023 }
6024 }
6025
6026 return u16Ftw;
6027}
6028
6029
6030/**
6031 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6032 *
6033 * @returns The compressed FTW.
6034 * @param u16FullFtw The full FTW to convert.
6035 */
6036static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6037{
6038 uint8_t u8Ftw = 0;
6039 for (unsigned i = 0; i < 8; i++)
6040 {
6041 if ((u16FullFtw & 3) != 3 /*empty*/)
6042 u8Ftw |= RT_BIT(i);
6043 u16FullFtw >>= 2;
6044 }
6045
6046 return u8Ftw;
6047}
6048
6049/** @} */
6050
6051
6052/** @name Memory access.
6053 *
6054 * @{
6055 */
6056
6057
6058/**
6059 * Updates the IEMCPU::cbWritten counter if applicable.
6060 *
6061 * @param pIemCpu The IEM per CPU data.
6062 * @param fAccess The access being accounted for.
6063 * @param cbMem The access size.
6064 */
6065DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6066{
6067 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6068 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6069 pIemCpu->cbWritten += (uint32_t)cbMem;
6070}
6071
6072
6073/**
6074 * Checks if the given segment can be written to, raise the appropriate
6075 * exception if not.
6076 *
6077 * @returns VBox strict status code.
6078 *
6079 * @param pIemCpu The IEM per CPU data.
6080 * @param pHid Pointer to the hidden register.
6081 * @param iSegReg The register number.
6082 * @param pu64BaseAddr Where to return the base address to use for the
6083 * segment. (In 64-bit code it may differ from the
6084 * base in the hidden segment.)
6085 */
6086static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6087{
6088 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6089 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6090 else
6091 {
6092 if (!pHid->Attr.n.u1Present)
6093 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6094
6095 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6096 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6097 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6098 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6099 *pu64BaseAddr = pHid->u64Base;
6100 }
6101 return VINF_SUCCESS;
6102}
6103
6104
6105/**
6106 * Checks if the given segment can be read from, raise the appropriate
6107 * exception if not.
6108 *
6109 * @returns VBox strict status code.
6110 *
6111 * @param pIemCpu The IEM per CPU data.
6112 * @param pHid Pointer to the hidden register.
6113 * @param iSegReg The register number.
6114 * @param pu64BaseAddr Where to return the base address to use for the
6115 * segment. (In 64-bit code it may differ from the
6116 * base in the hidden segment.)
6117 */
6118static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6119{
6120 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6121 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6122 else
6123 {
6124 if (!pHid->Attr.n.u1Present)
6125 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6126
6127 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6128 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6129 *pu64BaseAddr = pHid->u64Base;
6130 }
6131 return VINF_SUCCESS;
6132}
6133
6134
6135/**
6136 * Applies the segment limit, base and attributes.
6137 *
6138 * This may raise a \#GP or \#SS.
6139 *
6140 * @returns VBox strict status code.
6141 *
6142 * @param pIemCpu The IEM per CPU data.
6143 * @param fAccess The kind of access which is being performed.
6144 * @param iSegReg The index of the segment register to apply.
6145 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6146 * TSS, ++).
6147 * @param pGCPtrMem Pointer to the guest memory address to apply
6148 * segmentation to. Input and output parameter.
6149 */
6150static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
6151 size_t cbMem, PRTGCPTR pGCPtrMem)
6152{
6153 if (iSegReg == UINT8_MAX)
6154 return VINF_SUCCESS;
6155
6156 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6157 switch (pIemCpu->enmCpuMode)
6158 {
6159 case IEMMODE_16BIT:
6160 case IEMMODE_32BIT:
6161 {
6162 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6163 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6164
6165 Assert(pSel->Attr.n.u1Present);
6166 Assert(pSel->Attr.n.u1DescType);
6167 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6168 {
6169 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6170 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6171 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6172
6173 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6174 {
6175 /** @todo CPL check. */
6176 }
6177
6178 /*
6179 * There are two kinds of data selectors, normal and expand down.
6180 */
6181 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6182 {
6183 if ( GCPtrFirst32 > pSel->u32Limit
6184 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6185 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6186 }
6187 else
6188 {
6189 /*
6190 * The upper boundary is defined by the B bit, not the G bit!
6191 */
6192 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6193 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6194 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6195 }
6196 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6197 }
6198 else
6199 {
6200
6201 /*
6202 * Code selector and usually be used to read thru, writing is
6203 * only permitted in real and V8086 mode.
6204 */
6205 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6206 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6207 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6208 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6209 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6210
6211 if ( GCPtrFirst32 > pSel->u32Limit
6212 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6213 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6214
6215 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6216 {
6217 /** @todo CPL check. */
6218 }
6219
6220 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6221 }
6222 return VINF_SUCCESS;
6223 }
6224
6225 case IEMMODE_64BIT:
6226 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6227 *pGCPtrMem += pSel->u64Base;
6228 return VINF_SUCCESS;
6229
6230 default:
6231 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
6232 }
6233}
6234
6235
6236/**
6237 * Translates a virtual address to a physical physical address and checks if we
6238 * can access the page as specified.
6239 *
6240 * @param pIemCpu The IEM per CPU data.
6241 * @param GCPtrMem The virtual address.
6242 * @param fAccess The intended access.
6243 * @param pGCPhysMem Where to return the physical address.
6244 */
6245static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
6246 PRTGCPHYS pGCPhysMem)
6247{
6248 /** @todo Need a different PGM interface here. We're currently using
6249 * generic / REM interfaces. this won't cut it for R0 & RC. */
6250 RTGCPHYS GCPhys;
6251 uint64_t fFlags;
6252 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6253 if (RT_FAILURE(rc))
6254 {
6255 /** @todo Check unassigned memory in unpaged mode. */
6256 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6257 *pGCPhysMem = NIL_RTGCPHYS;
6258 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6259 }
6260
6261 /* If the page is writable and does not have the no-exec bit set, all
6262 access is allowed. Otherwise we'll have to check more carefully... */
6263 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6264 {
6265 /* Write to read only memory? */
6266 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6267 && !(fFlags & X86_PTE_RW)
6268 && ( pIemCpu->uCpl != 0
6269 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6270 {
6271 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6272 *pGCPhysMem = NIL_RTGCPHYS;
6273 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6274 }
6275
6276 /* Kernel memory accessed by userland? */
6277 if ( !(fFlags & X86_PTE_US)
6278 && pIemCpu->uCpl == 3
6279 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6280 {
6281 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6282 *pGCPhysMem = NIL_RTGCPHYS;
6283 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6284 }
6285
6286 /* Executing non-executable memory? */
6287 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6288 && (fFlags & X86_PTE_PAE_NX)
6289 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6290 {
6291 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6292 *pGCPhysMem = NIL_RTGCPHYS;
6293 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6294 VERR_ACCESS_DENIED);
6295 }
6296 }
6297
6298 /*
6299 * Set the dirty / access flags.
6300 * ASSUMES this is set when the address is translated rather than on committ...
6301 */
6302 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6303 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6304 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6305 {
6306 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6307 AssertRC(rc2);
6308 }
6309
6310 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6311 *pGCPhysMem = GCPhys;
6312 return VINF_SUCCESS;
6313}
6314
6315
6316
6317/**
6318 * Maps a physical page.
6319 *
6320 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6321 * @param pIemCpu The IEM per CPU data.
6322 * @param GCPhysMem The physical address.
6323 * @param fAccess The intended access.
6324 * @param ppvMem Where to return the mapping address.
6325 * @param pLock The PGM lock.
6326 */
6327static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6328{
6329#ifdef IEM_VERIFICATION_MODE_FULL
6330 /* Force the alternative path so we can ignore writes. */
6331 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6332 {
6333 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6334 {
6335 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6336 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6337 if (RT_FAILURE(rc2))
6338 pIemCpu->fProblematicMemory = true;
6339 }
6340 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6341 }
6342#endif
6343#ifdef IEM_LOG_MEMORY_WRITES
6344 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6345 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6346#endif
6347#ifdef IEM_VERIFICATION_MODE_MINIMAL
6348 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6349#endif
6350
6351 /** @todo This API may require some improving later. A private deal with PGM
6352 * regarding locking and unlocking needs to be struct. A couple of TLBs
6353 * living in PGM, but with publicly accessible inlined access methods
6354 * could perhaps be an even better solution. */
6355 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6356 GCPhysMem,
6357 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6358 pIemCpu->fBypassHandlers,
6359 ppvMem,
6360 pLock);
6361 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6362 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6363
6364#ifdef IEM_VERIFICATION_MODE_FULL
6365 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6366 pIemCpu->fProblematicMemory = true;
6367#endif
6368 return rc;
6369}
6370
6371
6372/**
6373 * Unmap a page previously mapped by iemMemPageMap.
6374 *
6375 * @param pIemCpu The IEM per CPU data.
6376 * @param GCPhysMem The physical address.
6377 * @param fAccess The intended access.
6378 * @param pvMem What iemMemPageMap returned.
6379 * @param pLock The PGM lock.
6380 */
6381DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6382{
6383 NOREF(pIemCpu);
6384 NOREF(GCPhysMem);
6385 NOREF(fAccess);
6386 NOREF(pvMem);
6387 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6388}
6389
6390
6391/**
6392 * Looks up a memory mapping entry.
6393 *
6394 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6395 * @param pIemCpu The IEM per CPU data.
6396 * @param pvMem The memory address.
6397 * @param fAccess The access to.
6398 */
6399DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6400{
6401 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6402 if ( pIemCpu->aMemMappings[0].pv == pvMem
6403 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6404 return 0;
6405 if ( pIemCpu->aMemMappings[1].pv == pvMem
6406 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6407 return 1;
6408 if ( pIemCpu->aMemMappings[2].pv == pvMem
6409 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6410 return 2;
6411 return VERR_NOT_FOUND;
6412}
6413
6414
6415/**
6416 * Finds a free memmap entry when using iNextMapping doesn't work.
6417 *
6418 * @returns Memory mapping index, 1024 on failure.
6419 * @param pIemCpu The IEM per CPU data.
6420 */
6421static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6422{
6423 /*
6424 * The easy case.
6425 */
6426 if (pIemCpu->cActiveMappings == 0)
6427 {
6428 pIemCpu->iNextMapping = 1;
6429 return 0;
6430 }
6431
6432 /* There should be enough mappings for all instructions. */
6433 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6434
6435 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6436 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6437 return i;
6438
6439 AssertFailedReturn(1024);
6440}
6441
6442
6443/**
6444 * Commits a bounce buffer that needs writing back and unmaps it.
6445 *
6446 * @returns Strict VBox status code.
6447 * @param pIemCpu The IEM per CPU data.
6448 * @param iMemMap The index of the buffer to commit.
6449 */
6450static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6451{
6452 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6453 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6454
6455 /*
6456 * Do the writing.
6457 */
6458 int rc;
6459#ifndef IEM_VERIFICATION_MODE_MINIMAL
6460 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6461 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6462 {
6463 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6464 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6465 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6466 if (!pIemCpu->fBypassHandlers)
6467 {
6468 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6469 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6470 pbBuf,
6471 cbFirst);
6472 if (cbSecond && rc == VINF_SUCCESS)
6473 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
6474 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6475 pbBuf + cbFirst,
6476 cbSecond);
6477 }
6478 else
6479 {
6480 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6481 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6482 pbBuf,
6483 cbFirst);
6484 if (cbSecond && rc == VINF_SUCCESS)
6485 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
6486 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6487 pbBuf + cbFirst,
6488 cbSecond);
6489 }
6490 if (rc != VINF_SUCCESS)
6491 {
6492 /** @todo status code handling */
6493 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6494 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
6495 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6496 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6497 }
6498 }
6499 else
6500#endif
6501 rc = VINF_SUCCESS;
6502
6503#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6504 /*
6505 * Record the write(s).
6506 */
6507 if (!pIemCpu->fNoRem)
6508 {
6509 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6510 if (pEvtRec)
6511 {
6512 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6513 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6514 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6515 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6516 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6517 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6518 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6519 }
6520 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6521 {
6522 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6523 if (pEvtRec)
6524 {
6525 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6526 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6527 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6528 memcpy(pEvtRec->u.RamWrite.ab,
6529 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6530 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6531 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6532 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6533 }
6534 }
6535 }
6536#endif
6537#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6538 if (rc == VINF_SUCCESS)
6539 {
6540 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6541 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6542 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6543 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6544 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6545 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6546
6547 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6548 g_cbIemWrote = cbWrote;
6549 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6550 }
6551#endif
6552
6553 /*
6554 * Free the mapping entry.
6555 */
6556 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6557 Assert(pIemCpu->cActiveMappings != 0);
6558 pIemCpu->cActiveMappings--;
6559 return rc;
6560}
6561
6562
6563/**
6564 * iemMemMap worker that deals with a request crossing pages.
6565 */
6566static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
6567 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6568{
6569 /*
6570 * Do the address translations.
6571 */
6572 RTGCPHYS GCPhysFirst;
6573 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6574 if (rcStrict != VINF_SUCCESS)
6575 return rcStrict;
6576
6577/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6578 * last byte. */
6579 RTGCPHYS GCPhysSecond;
6580 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6581 if (rcStrict != VINF_SUCCESS)
6582 return rcStrict;
6583 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6584
6585#ifdef IEM_VERIFICATION_MODE_FULL
6586 /*
6587 * Detect problematic memory when verifying so we can select
6588 * the right execution engine. (TLB: Redo this.)
6589 */
6590 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6591 {
6592 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
6593 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6594 if (RT_SUCCESS(rc2))
6595 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
6596 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6597 if (RT_FAILURE(rc2))
6598 pIemCpu->fProblematicMemory = true;
6599 }
6600#endif
6601
6602
6603 /*
6604 * Read in the current memory content if it's a read, execute or partial
6605 * write access.
6606 */
6607 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6608 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6609 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6610
6611 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6612 {
6613 int rc;
6614 if (!pIemCpu->fBypassHandlers)
6615 {
6616 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
6617 if (rc != VINF_SUCCESS)
6618 {
6619 /** @todo status code handling */
6620 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6621 return rc;
6622 }
6623 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
6624 if (rc != VINF_SUCCESS)
6625 {
6626 /** @todo status code handling */
6627 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6628 return rc;
6629 }
6630 }
6631 else
6632 {
6633 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
6634 if (rc != VINF_SUCCESS)
6635 {
6636 /** @todo status code handling */
6637 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6638 return rc;
6639 }
6640 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6641 if (rc != VINF_SUCCESS)
6642 {
6643 /** @todo status code handling */
6644 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6645 return rc;
6646 }
6647 }
6648
6649#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6650 if ( !pIemCpu->fNoRem
6651 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6652 {
6653 /*
6654 * Record the reads.
6655 */
6656 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6657 if (pEvtRec)
6658 {
6659 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6660 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6661 pEvtRec->u.RamRead.cb = cbFirstPage;
6662 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6663 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6664 }
6665 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6666 if (pEvtRec)
6667 {
6668 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6669 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6670 pEvtRec->u.RamRead.cb = cbSecondPage;
6671 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6672 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6673 }
6674 }
6675#endif
6676 }
6677#ifdef VBOX_STRICT
6678 else
6679 memset(pbBuf, 0xcc, cbMem);
6680#endif
6681#ifdef VBOX_STRICT
6682 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6683 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6684#endif
6685
6686 /*
6687 * Commit the bounce buffer entry.
6688 */
6689 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6690 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6691 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6692 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6693 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6694 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6695 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6696 pIemCpu->iNextMapping = iMemMap + 1;
6697 pIemCpu->cActiveMappings++;
6698
6699 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6700 *ppvMem = pbBuf;
6701 return VINF_SUCCESS;
6702}
6703
6704
6705/**
6706 * iemMemMap woker that deals with iemMemPageMap failures.
6707 */
6708static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6709 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6710{
6711 /*
6712 * Filter out conditions we can handle and the ones which shouldn't happen.
6713 */
6714 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6715 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6716 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6717 {
6718 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
6719 return rcMap;
6720 }
6721 pIemCpu->cPotentialExits++;
6722
6723 /*
6724 * Read in the current memory content if it's a read, execute or partial
6725 * write access.
6726 */
6727 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6728 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6729 {
6730 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6731 memset(pbBuf, 0xff, cbMem);
6732 else
6733 {
6734 int rc;
6735 if (!pIemCpu->fBypassHandlers)
6736 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
6737 else
6738 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6739 if (rc != VINF_SUCCESS)
6740 {
6741 /** @todo status code handling */
6742 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
6743 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
6744 return rc;
6745 }
6746 }
6747
6748#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6749 if ( !pIemCpu->fNoRem
6750 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6751 {
6752 /*
6753 * Record the read.
6754 */
6755 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6756 if (pEvtRec)
6757 {
6758 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6759 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6760 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6761 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6762 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6763 }
6764 }
6765#endif
6766 }
6767#ifdef VBOX_STRICT
6768 else
6769 memset(pbBuf, 0xcc, cbMem);
6770#endif
6771#ifdef VBOX_STRICT
6772 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6773 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6774#endif
6775
6776 /*
6777 * Commit the bounce buffer entry.
6778 */
6779 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6780 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6781 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6782 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6783 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6784 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6785 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6786 pIemCpu->iNextMapping = iMemMap + 1;
6787 pIemCpu->cActiveMappings++;
6788
6789 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6790 *ppvMem = pbBuf;
6791 return VINF_SUCCESS;
6792}
6793
6794
6795
6796/**
6797 * Maps the specified guest memory for the given kind of access.
6798 *
6799 * This may be using bounce buffering of the memory if it's crossing a page
6800 * boundary or if there is an access handler installed for any of it. Because
6801 * of lock prefix guarantees, we're in for some extra clutter when this
6802 * happens.
6803 *
6804 * This may raise a \#GP, \#SS, \#PF or \#AC.
6805 *
6806 * @returns VBox strict status code.
6807 *
6808 * @param pIemCpu The IEM per CPU data.
6809 * @param ppvMem Where to return the pointer to the mapped
6810 * memory.
6811 * @param cbMem The number of bytes to map. This is usually 1,
6812 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6813 * string operations it can be up to a page.
6814 * @param iSegReg The index of the segment register to use for
6815 * this access. The base and limits are checked.
6816 * Use UINT8_MAX to indicate that no segmentation
6817 * is required (for IDT, GDT and LDT accesses).
6818 * @param GCPtrMem The address of the guest memory.
6819 * @param a_fAccess How the memory is being accessed. The
6820 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6821 * how to map the memory, while the
6822 * IEM_ACCESS_WHAT_XXX bit is used when raising
6823 * exceptions.
6824 */
6825static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6826{
6827 /*
6828 * Check the input and figure out which mapping entry to use.
6829 */
6830 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6831 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6832
6833 unsigned iMemMap = pIemCpu->iNextMapping;
6834 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6835 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6836 {
6837 iMemMap = iemMemMapFindFree(pIemCpu);
6838 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
6839 }
6840
6841 /*
6842 * Map the memory, checking that we can actually access it. If something
6843 * slightly complicated happens, fall back on bounce buffering.
6844 */
6845 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6846 if (rcStrict != VINF_SUCCESS)
6847 return rcStrict;
6848
6849 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6850 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6851
6852 RTGCPHYS GCPhysFirst;
6853 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6854 if (rcStrict != VINF_SUCCESS)
6855 return rcStrict;
6856
6857 void *pvMem;
6858 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6859 if (rcStrict != VINF_SUCCESS)
6860 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6861
6862 /*
6863 * Fill in the mapping table entry.
6864 */
6865 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
6866 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
6867 pIemCpu->iNextMapping = iMemMap + 1;
6868 pIemCpu->cActiveMappings++;
6869
6870 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6871 *ppvMem = pvMem;
6872 return VINF_SUCCESS;
6873}
6874
6875
6876/**
6877 * Commits the guest memory if bounce buffered and unmaps it.
6878 *
6879 * @returns Strict VBox status code.
6880 * @param pIemCpu The IEM per CPU data.
6881 * @param pvMem The mapping.
6882 * @param fAccess The kind of access.
6883 */
6884static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6885{
6886 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
6887 AssertReturn(iMemMap >= 0, iMemMap);
6888
6889 /* If it's bounce buffered, we may need to write back the buffer. */
6890 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6891 {
6892 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6893 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
6894 }
6895 /* Otherwise unlock it. */
6896 else
6897 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6898
6899 /* Free the entry. */
6900 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6901 Assert(pIemCpu->cActiveMappings != 0);
6902 pIemCpu->cActiveMappings--;
6903 return VINF_SUCCESS;
6904}
6905
6906
6907/**
6908 * Rollbacks mappings, releasing page locks and such.
6909 *
6910 * The caller shall only call this after checking cActiveMappings.
6911 *
6912 * @returns Strict VBox status code to pass up.
6913 * @param pIemCpu The IEM per CPU data.
6914 */
6915static void iemMemRollback(PIEMCPU pIemCpu)
6916{
6917 Assert(pIemCpu->cActiveMappings > 0);
6918
6919 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
6920 while (iMemMap-- > 0)
6921 {
6922 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
6923 if (fAccess != IEM_ACCESS_INVALID)
6924 {
6925 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6926 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
6927 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6928 Assert(pIemCpu->cActiveMappings > 0);
6929 pIemCpu->cActiveMappings--;
6930 }
6931 }
6932}
6933
6934
6935/**
6936 * Fetches a data byte.
6937 *
6938 * @returns Strict VBox status code.
6939 * @param pIemCpu The IEM per CPU data.
6940 * @param pu8Dst Where to return the byte.
6941 * @param iSegReg The index of the segment register to use for
6942 * this access. The base and limits are checked.
6943 * @param GCPtrMem The address of the guest memory.
6944 */
6945static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6946{
6947 /* The lazy approach for now... */
6948 uint8_t const *pu8Src;
6949 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6950 if (rc == VINF_SUCCESS)
6951 {
6952 *pu8Dst = *pu8Src;
6953 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6954 }
6955 return rc;
6956}
6957
6958
6959/**
6960 * Fetches a data word.
6961 *
6962 * @returns Strict VBox status code.
6963 * @param pIemCpu The IEM per CPU data.
6964 * @param pu16Dst Where to return the word.
6965 * @param iSegReg The index of the segment register to use for
6966 * this access. The base and limits are checked.
6967 * @param GCPtrMem The address of the guest memory.
6968 */
6969static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6970{
6971 /* The lazy approach for now... */
6972 uint16_t const *pu16Src;
6973 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6974 if (rc == VINF_SUCCESS)
6975 {
6976 *pu16Dst = *pu16Src;
6977 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6978 }
6979 return rc;
6980}
6981
6982
6983/**
6984 * Fetches a data dword.
6985 *
6986 * @returns Strict VBox status code.
6987 * @param pIemCpu The IEM per CPU data.
6988 * @param pu32Dst Where to return the dword.
6989 * @param iSegReg The index of the segment register to use for
6990 * this access. The base and limits are checked.
6991 * @param GCPtrMem The address of the guest memory.
6992 */
6993static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6994{
6995 /* The lazy approach for now... */
6996 uint32_t const *pu32Src;
6997 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6998 if (rc == VINF_SUCCESS)
6999 {
7000 *pu32Dst = *pu32Src;
7001 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7002 }
7003 return rc;
7004}
7005
7006
7007#ifdef SOME_UNUSED_FUNCTION
7008/**
7009 * Fetches a data dword and sign extends it to a qword.
7010 *
7011 * @returns Strict VBox status code.
7012 * @param pIemCpu The IEM per CPU data.
7013 * @param pu64Dst Where to return the sign extended value.
7014 * @param iSegReg The index of the segment register to use for
7015 * this access. The base and limits are checked.
7016 * @param GCPtrMem The address of the guest memory.
7017 */
7018static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7019{
7020 /* The lazy approach for now... */
7021 int32_t const *pi32Src;
7022 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7023 if (rc == VINF_SUCCESS)
7024 {
7025 *pu64Dst = *pi32Src;
7026 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7027 }
7028#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7029 else
7030 *pu64Dst = 0;
7031#endif
7032 return rc;
7033}
7034#endif
7035
7036
7037/**
7038 * Fetches a data qword.
7039 *
7040 * @returns Strict VBox status code.
7041 * @param pIemCpu The IEM per CPU data.
7042 * @param pu64Dst Where to return the qword.
7043 * @param iSegReg The index of the segment register to use for
7044 * this access. The base and limits are checked.
7045 * @param GCPtrMem The address of the guest memory.
7046 */
7047static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7048{
7049 /* The lazy approach for now... */
7050 uint64_t const *pu64Src;
7051 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7052 if (rc == VINF_SUCCESS)
7053 {
7054 *pu64Dst = *pu64Src;
7055 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7056 }
7057 return rc;
7058}
7059
7060
7061/**
7062 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7063 *
7064 * @returns Strict VBox status code.
7065 * @param pIemCpu The IEM per CPU data.
7066 * @param pu64Dst Where to return the qword.
7067 * @param iSegReg The index of the segment register to use for
7068 * this access. The base and limits are checked.
7069 * @param GCPtrMem The address of the guest memory.
7070 */
7071static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7072{
7073 /* The lazy approach for now... */
7074 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7075 if (RT_UNLIKELY(GCPtrMem & 15))
7076 return iemRaiseGeneralProtectionFault0(pIemCpu);
7077
7078 uint64_t const *pu64Src;
7079 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7080 if (rc == VINF_SUCCESS)
7081 {
7082 *pu64Dst = *pu64Src;
7083 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7084 }
7085 return rc;
7086}
7087
7088
7089/**
7090 * Fetches a data tword.
7091 *
7092 * @returns Strict VBox status code.
7093 * @param pIemCpu The IEM per CPU data.
7094 * @param pr80Dst Where to return the tword.
7095 * @param iSegReg The index of the segment register to use for
7096 * this access. The base and limits are checked.
7097 * @param GCPtrMem The address of the guest memory.
7098 */
7099static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7100{
7101 /* The lazy approach for now... */
7102 PCRTFLOAT80U pr80Src;
7103 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7104 if (rc == VINF_SUCCESS)
7105 {
7106 *pr80Dst = *pr80Src;
7107 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7108 }
7109 return rc;
7110}
7111
7112
7113/**
7114 * Fetches a data dqword (double qword), generally SSE related.
7115 *
7116 * @returns Strict VBox status code.
7117 * @param pIemCpu The IEM per CPU data.
7118 * @param pu128Dst Where to return the qword.
7119 * @param iSegReg The index of the segment register to use for
7120 * this access. The base and limits are checked.
7121 * @param GCPtrMem The address of the guest memory.
7122 */
7123static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7124{
7125 /* The lazy approach for now... */
7126 uint128_t const *pu128Src;
7127 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7128 if (rc == VINF_SUCCESS)
7129 {
7130 *pu128Dst = *pu128Src;
7131 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7132 }
7133 return rc;
7134}
7135
7136
7137/**
7138 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7139 * related.
7140 *
7141 * Raises \#GP(0) if not aligned.
7142 *
7143 * @returns Strict VBox status code.
7144 * @param pIemCpu The IEM per CPU data.
7145 * @param pu128Dst Where to return the qword.
7146 * @param iSegReg The index of the segment register to use for
7147 * this access. The base and limits are checked.
7148 * @param GCPtrMem The address of the guest memory.
7149 */
7150static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7151{
7152 /* The lazy approach for now... */
7153 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7154 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7155 return iemRaiseGeneralProtectionFault0(pIemCpu);
7156
7157 uint128_t const *pu128Src;
7158 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7159 if (rc == VINF_SUCCESS)
7160 {
7161 *pu128Dst = *pu128Src;
7162 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7163 }
7164 return rc;
7165}
7166
7167
7168
7169
7170/**
7171 * Fetches a descriptor register (lgdt, lidt).
7172 *
7173 * @returns Strict VBox status code.
7174 * @param pIemCpu The IEM per CPU data.
7175 * @param pcbLimit Where to return the limit.
7176 * @param pGCPTrBase Where to return the base.
7177 * @param iSegReg The index of the segment register to use for
7178 * this access. The base and limits are checked.
7179 * @param GCPtrMem The address of the guest memory.
7180 * @param enmOpSize The effective operand size.
7181 */
7182static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
7183 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7184{
7185 uint8_t const *pu8Src;
7186 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7187 (void **)&pu8Src,
7188 enmOpSize == IEMMODE_64BIT
7189 ? 2 + 8
7190 : enmOpSize == IEMMODE_32BIT
7191 ? 2 + 4
7192 : 2 + 3,
7193 iSegReg,
7194 GCPtrMem,
7195 IEM_ACCESS_DATA_R);
7196 if (rcStrict == VINF_SUCCESS)
7197 {
7198 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7199 switch (enmOpSize)
7200 {
7201 case IEMMODE_16BIT:
7202 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7203 break;
7204 case IEMMODE_32BIT:
7205 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7206 break;
7207 case IEMMODE_64BIT:
7208 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7209 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7210 break;
7211
7212 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7213 }
7214 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7215 }
7216 return rcStrict;
7217}
7218
7219
7220
7221/**
7222 * Stores a data byte.
7223 *
7224 * @returns Strict VBox status code.
7225 * @param pIemCpu The IEM per CPU data.
7226 * @param iSegReg The index of the segment register to use for
7227 * this access. The base and limits are checked.
7228 * @param GCPtrMem The address of the guest memory.
7229 * @param u8Value The value to store.
7230 */
7231static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7232{
7233 /* The lazy approach for now... */
7234 uint8_t *pu8Dst;
7235 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7236 if (rc == VINF_SUCCESS)
7237 {
7238 *pu8Dst = u8Value;
7239 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7240 }
7241 return rc;
7242}
7243
7244
7245/**
7246 * Stores a data word.
7247 *
7248 * @returns Strict VBox status code.
7249 * @param pIemCpu The IEM per CPU data.
7250 * @param iSegReg The index of the segment register to use for
7251 * this access. The base and limits are checked.
7252 * @param GCPtrMem The address of the guest memory.
7253 * @param u16Value The value to store.
7254 */
7255static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7256{
7257 /* The lazy approach for now... */
7258 uint16_t *pu16Dst;
7259 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7260 if (rc == VINF_SUCCESS)
7261 {
7262 *pu16Dst = u16Value;
7263 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7264 }
7265 return rc;
7266}
7267
7268
7269/**
7270 * Stores a data dword.
7271 *
7272 * @returns Strict VBox status code.
7273 * @param pIemCpu The IEM per CPU data.
7274 * @param iSegReg The index of the segment register to use for
7275 * this access. The base and limits are checked.
7276 * @param GCPtrMem The address of the guest memory.
7277 * @param u32Value The value to store.
7278 */
7279static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7280{
7281 /* The lazy approach for now... */
7282 uint32_t *pu32Dst;
7283 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7284 if (rc == VINF_SUCCESS)
7285 {
7286 *pu32Dst = u32Value;
7287 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7288 }
7289 return rc;
7290}
7291
7292
7293/**
7294 * Stores a data qword.
7295 *
7296 * @returns Strict VBox status code.
7297 * @param pIemCpu The IEM per CPU data.
7298 * @param iSegReg The index of the segment register to use for
7299 * this access. The base and limits are checked.
7300 * @param GCPtrMem The address of the guest memory.
7301 * @param u64Value The value to store.
7302 */
7303static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7304{
7305 /* The lazy approach for now... */
7306 uint64_t *pu64Dst;
7307 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7308 if (rc == VINF_SUCCESS)
7309 {
7310 *pu64Dst = u64Value;
7311 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7312 }
7313 return rc;
7314}
7315
7316
7317/**
7318 * Stores a data dqword.
7319 *
7320 * @returns Strict VBox status code.
7321 * @param pIemCpu The IEM per CPU data.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 * @param u64Value The value to store.
7326 */
7327static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7328{
7329 /* The lazy approach for now... */
7330 uint128_t *pu128Dst;
7331 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7332 if (rc == VINF_SUCCESS)
7333 {
7334 *pu128Dst = u128Value;
7335 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7336 }
7337 return rc;
7338}
7339
7340
7341/**
7342 * Stores a data dqword, SSE aligned.
7343 *
7344 * @returns Strict VBox status code.
7345 * @param pIemCpu The IEM per CPU data.
7346 * @param iSegReg The index of the segment register to use for
7347 * this access. The base and limits are checked.
7348 * @param GCPtrMem The address of the guest memory.
7349 * @param u64Value The value to store.
7350 */
7351static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7352{
7353 /* The lazy approach for now... */
7354 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7355 return iemRaiseGeneralProtectionFault0(pIemCpu);
7356
7357 uint128_t *pu128Dst;
7358 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7359 if (rc == VINF_SUCCESS)
7360 {
7361 *pu128Dst = u128Value;
7362 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7363 }
7364 return rc;
7365}
7366
7367
7368/**
7369 * Stores a descriptor register (sgdt, sidt).
7370 *
7371 * @returns Strict VBox status code.
7372 * @param pIemCpu The IEM per CPU data.
7373 * @param cbLimit The limit.
7374 * @param GCPTrBase The base address.
7375 * @param iSegReg The index of the segment register to use for
7376 * this access. The base and limits are checked.
7377 * @param GCPtrMem The address of the guest memory.
7378 * @param enmOpSize The effective operand size.
7379 */
7380static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
7381 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7382{
7383 uint8_t *pu8Src;
7384 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7385 (void **)&pu8Src,
7386 enmOpSize == IEMMODE_64BIT
7387 ? 2 + 8
7388 : enmOpSize == IEMMODE_32BIT
7389 ? 2 + 4
7390 : 2 + 3,
7391 iSegReg,
7392 GCPtrMem,
7393 IEM_ACCESS_DATA_W);
7394 if (rcStrict == VINF_SUCCESS)
7395 {
7396 pu8Src[0] = RT_BYTE1(cbLimit);
7397 pu8Src[1] = RT_BYTE2(cbLimit);
7398 pu8Src[2] = RT_BYTE1(GCPtrBase);
7399 pu8Src[3] = RT_BYTE2(GCPtrBase);
7400 pu8Src[4] = RT_BYTE3(GCPtrBase);
7401 if (enmOpSize == IEMMODE_16BIT)
7402 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7403 else
7404 {
7405 pu8Src[5] = RT_BYTE4(GCPtrBase);
7406 if (enmOpSize == IEMMODE_64BIT)
7407 {
7408 pu8Src[6] = RT_BYTE5(GCPtrBase);
7409 pu8Src[7] = RT_BYTE6(GCPtrBase);
7410 pu8Src[8] = RT_BYTE7(GCPtrBase);
7411 pu8Src[9] = RT_BYTE8(GCPtrBase);
7412 }
7413 }
7414 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7415 }
7416 return rcStrict;
7417}
7418
7419
7420/**
7421 * Pushes a word onto the stack.
7422 *
7423 * @returns Strict VBox status code.
7424 * @param pIemCpu The IEM per CPU data.
7425 * @param u16Value The value to push.
7426 */
7427static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7428{
7429 /* Increment the stack pointer. */
7430 uint64_t uNewRsp;
7431 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7432 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7433
7434 /* Write the word the lazy way. */
7435 uint16_t *pu16Dst;
7436 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7437 if (rc == VINF_SUCCESS)
7438 {
7439 *pu16Dst = u16Value;
7440 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7441 }
7442
7443 /* Commit the new RSP value unless we an access handler made trouble. */
7444 if (rc == VINF_SUCCESS)
7445 pCtx->rsp = uNewRsp;
7446
7447 return rc;
7448}
7449
7450
7451/**
7452 * Pushes a dword onto the stack.
7453 *
7454 * @returns Strict VBox status code.
7455 * @param pIemCpu The IEM per CPU data.
7456 * @param u32Value The value to push.
7457 */
7458static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7459{
7460 /* Increment the stack pointer. */
7461 uint64_t uNewRsp;
7462 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7463 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7464
7465 /* Write the dword the lazy way. */
7466 uint32_t *pu32Dst;
7467 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7468 if (rc == VINF_SUCCESS)
7469 {
7470 *pu32Dst = u32Value;
7471 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7472 }
7473
7474 /* Commit the new RSP value unless we an access handler made trouble. */
7475 if (rc == VINF_SUCCESS)
7476 pCtx->rsp = uNewRsp;
7477
7478 return rc;
7479}
7480
7481
7482/**
7483 * Pushes a dword segment register value onto the stack.
7484 *
7485 * @returns Strict VBox status code.
7486 * @param pIemCpu The IEM per CPU data.
7487 * @param u16Value The value to push.
7488 */
7489static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7490{
7491 /* Increment the stack pointer. */
7492 uint64_t uNewRsp;
7493 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7494 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7495
7496 VBOXSTRICTRC rc;
7497 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7498 {
7499 /* The recompiler writes a full dword. */
7500 uint32_t *pu32Dst;
7501 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7502 if (rc == VINF_SUCCESS)
7503 {
7504 *pu32Dst = u32Value;
7505 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7506 }
7507 }
7508 else
7509 {
7510 /* The intel docs talks about zero extending the selector register
7511 value. My actual intel CPU here might be zero extending the value
7512 but it still only writes the lower word... */
7513 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7514 * happens when crossing an electric page boundrary, is the high word
7515 * checked for write accessibility or not? Probably it is. What about
7516 * segment limits? */
7517 uint16_t *pu16Dst;
7518 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7519 if (rc == VINF_SUCCESS)
7520 {
7521 *pu16Dst = (uint16_t)u32Value;
7522 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7523 }
7524 }
7525
7526 /* Commit the new RSP value unless we an access handler made trouble. */
7527 if (rc == VINF_SUCCESS)
7528 pCtx->rsp = uNewRsp;
7529
7530 return rc;
7531}
7532
7533
7534/**
7535 * Pushes a qword onto the stack.
7536 *
7537 * @returns Strict VBox status code.
7538 * @param pIemCpu The IEM per CPU data.
7539 * @param u64Value The value to push.
7540 */
7541static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7542{
7543 /* Increment the stack pointer. */
7544 uint64_t uNewRsp;
7545 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7546 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7547
7548 /* Write the word the lazy way. */
7549 uint64_t *pu64Dst;
7550 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7551 if (rc == VINF_SUCCESS)
7552 {
7553 *pu64Dst = u64Value;
7554 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7555 }
7556
7557 /* Commit the new RSP value unless we an access handler made trouble. */
7558 if (rc == VINF_SUCCESS)
7559 pCtx->rsp = uNewRsp;
7560
7561 return rc;
7562}
7563
7564
7565/**
7566 * Pops a word from the stack.
7567 *
7568 * @returns Strict VBox status code.
7569 * @param pIemCpu The IEM per CPU data.
7570 * @param pu16Value Where to store the popped value.
7571 */
7572static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7573{
7574 /* Increment the stack pointer. */
7575 uint64_t uNewRsp;
7576 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7577 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7578
7579 /* Write the word the lazy way. */
7580 uint16_t const *pu16Src;
7581 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7582 if (rc == VINF_SUCCESS)
7583 {
7584 *pu16Value = *pu16Src;
7585 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7586
7587 /* Commit the new RSP value. */
7588 if (rc == VINF_SUCCESS)
7589 pCtx->rsp = uNewRsp;
7590 }
7591
7592 return rc;
7593}
7594
7595
7596/**
7597 * Pops a dword from the stack.
7598 *
7599 * @returns Strict VBox status code.
7600 * @param pIemCpu The IEM per CPU data.
7601 * @param pu32Value Where to store the popped value.
7602 */
7603static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7604{
7605 /* Increment the stack pointer. */
7606 uint64_t uNewRsp;
7607 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7608 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7609
7610 /* Write the word the lazy way. */
7611 uint32_t const *pu32Src;
7612 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7613 if (rc == VINF_SUCCESS)
7614 {
7615 *pu32Value = *pu32Src;
7616 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7617
7618 /* Commit the new RSP value. */
7619 if (rc == VINF_SUCCESS)
7620 pCtx->rsp = uNewRsp;
7621 }
7622
7623 return rc;
7624}
7625
7626
7627/**
7628 * Pops a qword from the stack.
7629 *
7630 * @returns Strict VBox status code.
7631 * @param pIemCpu The IEM per CPU data.
7632 * @param pu64Value Where to store the popped value.
7633 */
7634static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7635{
7636 /* Increment the stack pointer. */
7637 uint64_t uNewRsp;
7638 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7639 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7640
7641 /* Write the word the lazy way. */
7642 uint64_t const *pu64Src;
7643 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7644 if (rc == VINF_SUCCESS)
7645 {
7646 *pu64Value = *pu64Src;
7647 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7648
7649 /* Commit the new RSP value. */
7650 if (rc == VINF_SUCCESS)
7651 pCtx->rsp = uNewRsp;
7652 }
7653
7654 return rc;
7655}
7656
7657
7658/**
7659 * Pushes a word onto the stack, using a temporary stack pointer.
7660 *
7661 * @returns Strict VBox status code.
7662 * @param pIemCpu The IEM per CPU data.
7663 * @param u16Value The value to push.
7664 * @param pTmpRsp Pointer to the temporary stack pointer.
7665 */
7666static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7667{
7668 /* Increment the stack pointer. */
7669 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7670 RTUINT64U NewRsp = *pTmpRsp;
7671 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7672
7673 /* Write the word the lazy way. */
7674 uint16_t *pu16Dst;
7675 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7676 if (rc == VINF_SUCCESS)
7677 {
7678 *pu16Dst = u16Value;
7679 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7680 }
7681
7682 /* Commit the new RSP value unless we an access handler made trouble. */
7683 if (rc == VINF_SUCCESS)
7684 *pTmpRsp = NewRsp;
7685
7686 return rc;
7687}
7688
7689
7690/**
7691 * Pushes a dword onto the stack, using a temporary stack pointer.
7692 *
7693 * @returns Strict VBox status code.
7694 * @param pIemCpu The IEM per CPU data.
7695 * @param u32Value The value to push.
7696 * @param pTmpRsp Pointer to the temporary stack pointer.
7697 */
7698static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7699{
7700 /* Increment the stack pointer. */
7701 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7702 RTUINT64U NewRsp = *pTmpRsp;
7703 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7704
7705 /* Write the word the lazy way. */
7706 uint32_t *pu32Dst;
7707 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7708 if (rc == VINF_SUCCESS)
7709 {
7710 *pu32Dst = u32Value;
7711 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7712 }
7713
7714 /* Commit the new RSP value unless we an access handler made trouble. */
7715 if (rc == VINF_SUCCESS)
7716 *pTmpRsp = NewRsp;
7717
7718 return rc;
7719}
7720
7721
7722/**
7723 * Pushes a dword onto the stack, using a temporary stack pointer.
7724 *
7725 * @returns Strict VBox status code.
7726 * @param pIemCpu The IEM per CPU data.
7727 * @param u64Value The value to push.
7728 * @param pTmpRsp Pointer to the temporary stack pointer.
7729 */
7730static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7731{
7732 /* Increment the stack pointer. */
7733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7734 RTUINT64U NewRsp = *pTmpRsp;
7735 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7736
7737 /* Write the word the lazy way. */
7738 uint64_t *pu64Dst;
7739 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7740 if (rc == VINF_SUCCESS)
7741 {
7742 *pu64Dst = u64Value;
7743 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7744 }
7745
7746 /* Commit the new RSP value unless we an access handler made trouble. */
7747 if (rc == VINF_SUCCESS)
7748 *pTmpRsp = NewRsp;
7749
7750 return rc;
7751}
7752
7753
7754/**
7755 * Pops a word from the stack, using a temporary stack pointer.
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pIemCpu The IEM per CPU data.
7759 * @param pu16Value Where to store the popped value.
7760 * @param pTmpRsp Pointer to the temporary stack pointer.
7761 */
7762static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7763{
7764 /* Increment the stack pointer. */
7765 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7766 RTUINT64U NewRsp = *pTmpRsp;
7767 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7768
7769 /* Write the word the lazy way. */
7770 uint16_t const *pu16Src;
7771 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7772 if (rc == VINF_SUCCESS)
7773 {
7774 *pu16Value = *pu16Src;
7775 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7776
7777 /* Commit the new RSP value. */
7778 if (rc == VINF_SUCCESS)
7779 *pTmpRsp = NewRsp;
7780 }
7781
7782 return rc;
7783}
7784
7785
7786/**
7787 * Pops a dword from the stack, using a temporary stack pointer.
7788 *
7789 * @returns Strict VBox status code.
7790 * @param pIemCpu The IEM per CPU data.
7791 * @param pu32Value Where to store the popped value.
7792 * @param pTmpRsp Pointer to the temporary stack pointer.
7793 */
7794static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7795{
7796 /* Increment the stack pointer. */
7797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7798 RTUINT64U NewRsp = *pTmpRsp;
7799 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7800
7801 /* Write the word the lazy way. */
7802 uint32_t const *pu32Src;
7803 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7804 if (rc == VINF_SUCCESS)
7805 {
7806 *pu32Value = *pu32Src;
7807 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7808
7809 /* Commit the new RSP value. */
7810 if (rc == VINF_SUCCESS)
7811 *pTmpRsp = NewRsp;
7812 }
7813
7814 return rc;
7815}
7816
7817
7818/**
7819 * Pops a qword from the stack, using a temporary stack pointer.
7820 *
7821 * @returns Strict VBox status code.
7822 * @param pIemCpu The IEM per CPU data.
7823 * @param pu64Value Where to store the popped value.
7824 * @param pTmpRsp Pointer to the temporary stack pointer.
7825 */
7826static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7827{
7828 /* Increment the stack pointer. */
7829 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7830 RTUINT64U NewRsp = *pTmpRsp;
7831 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7832
7833 /* Write the word the lazy way. */
7834 uint64_t const *pu64Src;
7835 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7836 if (rcStrict == VINF_SUCCESS)
7837 {
7838 *pu64Value = *pu64Src;
7839 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7840
7841 /* Commit the new RSP value. */
7842 if (rcStrict == VINF_SUCCESS)
7843 *pTmpRsp = NewRsp;
7844 }
7845
7846 return rcStrict;
7847}
7848
7849
7850/**
7851 * Begin a special stack push (used by interrupt, exceptions and such).
7852 *
7853 * This will raise #SS or #PF if appropriate.
7854 *
7855 * @returns Strict VBox status code.
7856 * @param pIemCpu The IEM per CPU data.
7857 * @param cbMem The number of bytes to push onto the stack.
7858 * @param ppvMem Where to return the pointer to the stack memory.
7859 * As with the other memory functions this could be
7860 * direct access or bounce buffered access, so
7861 * don't commit register until the commit call
7862 * succeeds.
7863 * @param puNewRsp Where to return the new RSP value. This must be
7864 * passed unchanged to
7865 * iemMemStackPushCommitSpecial().
7866 */
7867static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
7868{
7869 Assert(cbMem < UINT8_MAX);
7870 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7871 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7872 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7873}
7874
7875
7876/**
7877 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7878 *
7879 * This will update the rSP.
7880 *
7881 * @returns Strict VBox status code.
7882 * @param pIemCpu The IEM per CPU data.
7883 * @param pvMem The pointer returned by
7884 * iemMemStackPushBeginSpecial().
7885 * @param uNewRsp The new RSP value returned by
7886 * iemMemStackPushBeginSpecial().
7887 */
7888static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
7889{
7890 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
7891 if (rcStrict == VINF_SUCCESS)
7892 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7893 return rcStrict;
7894}
7895
7896
7897/**
7898 * Begin a special stack pop (used by iret, retf and such).
7899 *
7900 * This will raise \#SS or \#PF if appropriate.
7901 *
7902 * @returns Strict VBox status code.
7903 * @param pIemCpu The IEM per CPU data.
7904 * @param cbMem The number of bytes to push onto the stack.
7905 * @param ppvMem Where to return the pointer to the stack memory.
7906 * @param puNewRsp Where to return the new RSP value. This must be
7907 * passed unchanged to
7908 * iemMemStackPopCommitSpecial() or applied
7909 * manually if iemMemStackPopDoneSpecial() is used.
7910 */
7911static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7912{
7913 Assert(cbMem < UINT8_MAX);
7914 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7915 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
7916 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7917}
7918
7919
7920/**
7921 * Continue a special stack pop (used by iret and retf).
7922 *
7923 * This will raise \#SS or \#PF if appropriate.
7924 *
7925 * @returns Strict VBox status code.
7926 * @param pIemCpu The IEM per CPU data.
7927 * @param cbMem The number of bytes to push onto the stack.
7928 * @param ppvMem Where to return the pointer to the stack memory.
7929 * @param puNewRsp Where to return the new RSP value. This must be
7930 * passed unchanged to
7931 * iemMemStackPopCommitSpecial() or applied
7932 * manually if iemMemStackPopDoneSpecial() is used.
7933 */
7934static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
7935{
7936 Assert(cbMem < UINT8_MAX);
7937 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7938 RTUINT64U NewRsp;
7939 NewRsp.u = *puNewRsp;
7940 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7941 *puNewRsp = NewRsp.u;
7942 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7943}
7944
7945
7946/**
7947 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7948 *
7949 * This will update the rSP.
7950 *
7951 * @returns Strict VBox status code.
7952 * @param pIemCpu The IEM per CPU data.
7953 * @param pvMem The pointer returned by
7954 * iemMemStackPopBeginSpecial().
7955 * @param uNewRsp The new RSP value returned by
7956 * iemMemStackPopBeginSpecial().
7957 */
7958static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7959{
7960 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7961 if (rcStrict == VINF_SUCCESS)
7962 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7963 return rcStrict;
7964}
7965
7966
7967/**
7968 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7969 * iemMemStackPopContinueSpecial).
7970 *
7971 * The caller will manually commit the rSP.
7972 *
7973 * @returns Strict VBox status code.
7974 * @param pIemCpu The IEM per CPU data.
7975 * @param pvMem The pointer returned by
7976 * iemMemStackPopBeginSpecial() or
7977 * iemMemStackPopContinueSpecial().
7978 */
7979static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
7980{
7981 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7982}
7983
7984
7985/**
7986 * Fetches a system table byte.
7987 *
7988 * @returns Strict VBox status code.
7989 * @param pIemCpu The IEM per CPU data.
7990 * @param pbDst Where to return the byte.
7991 * @param iSegReg The index of the segment register to use for
7992 * this access. The base and limits are checked.
7993 * @param GCPtrMem The address of the guest memory.
7994 */
7995static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7996{
7997 /* The lazy approach for now... */
7998 uint8_t const *pbSrc;
7999 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8000 if (rc == VINF_SUCCESS)
8001 {
8002 *pbDst = *pbSrc;
8003 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8004 }
8005 return rc;
8006}
8007
8008
8009/**
8010 * Fetches a system table word.
8011 *
8012 * @returns Strict VBox status code.
8013 * @param pIemCpu The IEM per CPU data.
8014 * @param pu16Dst Where to return the word.
8015 * @param iSegReg The index of the segment register to use for
8016 * this access. The base and limits are checked.
8017 * @param GCPtrMem The address of the guest memory.
8018 */
8019static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8020{
8021 /* The lazy approach for now... */
8022 uint16_t const *pu16Src;
8023 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8024 if (rc == VINF_SUCCESS)
8025 {
8026 *pu16Dst = *pu16Src;
8027 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8028 }
8029 return rc;
8030}
8031
8032
8033/**
8034 * Fetches a system table dword.
8035 *
8036 * @returns Strict VBox status code.
8037 * @param pIemCpu The IEM per CPU data.
8038 * @param pu32Dst Where to return the dword.
8039 * @param iSegReg The index of the segment register to use for
8040 * this access. The base and limits are checked.
8041 * @param GCPtrMem The address of the guest memory.
8042 */
8043static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8044{
8045 /* The lazy approach for now... */
8046 uint32_t const *pu32Src;
8047 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8048 if (rc == VINF_SUCCESS)
8049 {
8050 *pu32Dst = *pu32Src;
8051 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8052 }
8053 return rc;
8054}
8055
8056
8057/**
8058 * Fetches a system table qword.
8059 *
8060 * @returns Strict VBox status code.
8061 * @param pIemCpu The IEM per CPU data.
8062 * @param pu64Dst Where to return the qword.
8063 * @param iSegReg The index of the segment register to use for
8064 * this access. The base and limits are checked.
8065 * @param GCPtrMem The address of the guest memory.
8066 */
8067static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8068{
8069 /* The lazy approach for now... */
8070 uint64_t const *pu64Src;
8071 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8072 if (rc == VINF_SUCCESS)
8073 {
8074 *pu64Dst = *pu64Src;
8075 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8076 }
8077 return rc;
8078}
8079
8080
8081/**
8082 * Fetches a descriptor table entry with caller specified error code.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pIemCpu The IEM per CPU.
8086 * @param pDesc Where to return the descriptor table entry.
8087 * @param uSel The selector which table entry to fetch.
8088 * @param uXcpt The exception to raise on table lookup error.
8089 * @param uErrorCode The error code associated with the exception.
8090 */
8091static VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt,
8092 uint16_t uErrorCode)
8093{
8094 AssertPtr(pDesc);
8095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8096
8097 /** @todo did the 286 require all 8 bytes to be accessible? */
8098 /*
8099 * Get the selector table base and check bounds.
8100 */
8101 RTGCPTR GCPtrBase;
8102 if (uSel & X86_SEL_LDT)
8103 {
8104 if ( !pCtx->ldtr.Attr.n.u1Present
8105 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8106 {
8107 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8108 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8109 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8110 uErrorCode, 0);
8111 }
8112
8113 Assert(pCtx->ldtr.Attr.n.u1Present);
8114 GCPtrBase = pCtx->ldtr.u64Base;
8115 }
8116 else
8117 {
8118 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8119 {
8120 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8121 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8122 uErrorCode, 0);
8123 }
8124 GCPtrBase = pCtx->gdtr.pGdt;
8125 }
8126
8127 /*
8128 * Read the legacy descriptor and maybe the long mode extensions if
8129 * required.
8130 */
8131 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8132 if (rcStrict == VINF_SUCCESS)
8133 {
8134 if ( !IEM_IS_LONG_MODE(pIemCpu)
8135 || pDesc->Legacy.Gen.u1DescType)
8136 pDesc->Long.au64[1] = 0;
8137 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8138 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8139 else
8140 {
8141 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8142 /** @todo is this the right exception? */
8143 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8144 }
8145 }
8146 return rcStrict;
8147}
8148
8149
8150/**
8151 * Fetches a descriptor table entry.
8152 *
8153 * @returns Strict VBox status code.
8154 * @param pIemCpu The IEM per CPU.
8155 * @param pDesc Where to return the descriptor table entry.
8156 * @param uSel The selector which table entry to fetch.
8157 * @param uXcpt The exception to raise on table lookup error.
8158 */
8159static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8160{
8161 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8162}
8163
8164
8165/**
8166 * Fakes a long mode stack selector for SS = 0.
8167 *
8168 * @param pDescSs Where to return the fake stack descriptor.
8169 * @param uDpl The DPL we want.
8170 */
8171static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8172{
8173 pDescSs->Long.au64[0] = 0;
8174 pDescSs->Long.au64[1] = 0;
8175 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8176 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8177 pDescSs->Long.Gen.u2Dpl = uDpl;
8178 pDescSs->Long.Gen.u1Present = 1;
8179 pDescSs->Long.Gen.u1Long = 1;
8180}
8181
8182
8183/**
8184 * Marks the selector descriptor as accessed (only non-system descriptors).
8185 *
8186 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8187 * will therefore skip the limit checks.
8188 *
8189 * @returns Strict VBox status code.
8190 * @param pIemCpu The IEM per CPU.
8191 * @param uSel The selector.
8192 */
8193static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8194{
8195 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8196
8197 /*
8198 * Get the selector table base and calculate the entry address.
8199 */
8200 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8201 ? pCtx->ldtr.u64Base
8202 : pCtx->gdtr.pGdt;
8203 GCPtr += uSel & X86_SEL_MASK;
8204
8205 /*
8206 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8207 * ugly stuff to avoid this. This will make sure it's an atomic access
8208 * as well more or less remove any question about 8-bit or 32-bit accesss.
8209 */
8210 VBOXSTRICTRC rcStrict;
8211 uint32_t volatile *pu32;
8212 if ((GCPtr & 3) == 0)
8213 {
8214 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8215 GCPtr += 2 + 2;
8216 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8217 if (rcStrict != VINF_SUCCESS)
8218 return rcStrict;
8219 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8220 }
8221 else
8222 {
8223 /* The misaligned GDT/LDT case, map the whole thing. */
8224 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8225 if (rcStrict != VINF_SUCCESS)
8226 return rcStrict;
8227 switch ((uintptr_t)pu32 & 3)
8228 {
8229 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8230 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8231 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8232 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8233 }
8234 }
8235
8236 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8237}
8238
8239/** @} */
8240
8241
8242/*
8243 * Include the C/C++ implementation of instruction.
8244 */
8245#include "IEMAllCImpl.cpp.h"
8246
8247
8248
8249/** @name "Microcode" macros.
8250 *
8251 * The idea is that we should be able to use the same code to interpret
8252 * instructions as well as recompiler instructions. Thus this obfuscation.
8253 *
8254 * @{
8255 */
8256#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8257#define IEM_MC_END() }
8258#define IEM_MC_PAUSE() do {} while (0)
8259#define IEM_MC_CONTINUE() do {} while (0)
8260
8261/** Internal macro. */
8262#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8263 do \
8264 { \
8265 VBOXSTRICTRC rcStrict2 = a_Expr; \
8266 if (rcStrict2 != VINF_SUCCESS) \
8267 return rcStrict2; \
8268 } while (0)
8269
8270#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8271#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8272#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8273#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8274#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8275#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8276#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8277
8278#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8279#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8280 do { \
8281 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8282 return iemRaiseDeviceNotAvailable(pIemCpu); \
8283 } while (0)
8284#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8285 do { \
8286 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
8287 return iemRaiseMathFault(pIemCpu); \
8288 } while (0)
8289#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8290 do { \
8291 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8292 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
8293 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
8294 return iemRaiseUndefinedOpcode(pIemCpu); \
8295 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8296 return iemRaiseDeviceNotAvailable(pIemCpu); \
8297 } while (0)
8298#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8299 do { \
8300 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8301 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
8302 return iemRaiseUndefinedOpcode(pIemCpu); \
8303 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8304 return iemRaiseDeviceNotAvailable(pIemCpu); \
8305 } while (0)
8306#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8307 do { \
8308 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8309 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
8310 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
8311 return iemRaiseUndefinedOpcode(pIemCpu); \
8312 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8313 return iemRaiseDeviceNotAvailable(pIemCpu); \
8314 } while (0)
8315#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8316 do { \
8317 if (pIemCpu->uCpl != 0) \
8318 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8319 } while (0)
8320
8321
8322#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8323#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8324#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8325#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8326#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8327#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8328#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8329 uint32_t a_Name; \
8330 uint32_t *a_pName = &a_Name
8331#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8332 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8333
8334#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8335#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8336
8337#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8338#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8339#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8340#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8341#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8342#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8343#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8344#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8345#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8346#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8347#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8348#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8349#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8350#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8351#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8352#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8353#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8354#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8355#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8356#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8357#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8358#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8359#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8360#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8361#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8362#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8363#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8364#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8365#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8366/** @note Not for IOPL or IF testing or modification. */
8367#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8368#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8369#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
8370#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
8371
8372#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8373#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8374#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8375#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8376#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8377#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8378#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8379#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8380#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8381#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8382#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8383 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8384
8385#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8386#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8387/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8388 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8389#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8390#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8391/** @note Not for IOPL or IF testing or modification. */
8392#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8393
8394#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8395#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8396#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8397 do { \
8398 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8399 *pu32Reg += (a_u32Value); \
8400 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8401 } while (0)
8402#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8403
8404#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8405#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8406#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8407 do { \
8408 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8409 *pu32Reg -= (a_u32Value); \
8410 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8411 } while (0)
8412#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8413
8414#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8415#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8416#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8417#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8418#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8419#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8420#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8421
8422#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8423#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8424#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8425#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8426
8427#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8428#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8429#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8430
8431#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8432#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8433
8434#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8435#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8436#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8437
8438#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8439#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8440#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8441
8442#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8443
8444#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8445
8446#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8447#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8448#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8449 do { \
8450 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8451 *pu32Reg &= (a_u32Value); \
8452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8453 } while (0)
8454#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8455
8456#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8457#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8458#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8459 do { \
8460 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8461 *pu32Reg |= (a_u32Value); \
8462 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8463 } while (0)
8464#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8465
8466
8467/** @note Not for IOPL or IF modification. */
8468#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8469/** @note Not for IOPL or IF modification. */
8470#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8471/** @note Not for IOPL or IF modification. */
8472#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8473
8474#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8475
8476
8477#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8478 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
8479#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8480 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
8481#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8482 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8483#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8484 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8485#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8486 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8487#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8488 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8489#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8490 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
8491
8492#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8493 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
8494#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8495 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
8496#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8497 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
8498#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8499 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8500#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8501 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8502 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8503 } while (0)
8504#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8505 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8506 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
8507 } while (0)
8508#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8509 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8510#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8511 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
8512#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8513 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
8514
8515#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8516 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8517#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8518 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8519#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8520 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8521
8522#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8523 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8524#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8525 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8526#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8527 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8528
8529#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8530 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8531#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8532 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8533#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8534 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8535
8536#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8537 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8538
8539#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8541#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8543#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8544 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8545
8546#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8548#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8549 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8550#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8551 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8552
8553#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8555#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8557
8558
8559
8560#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8561 do { \
8562 uint8_t u8Tmp; \
8563 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8564 (a_u16Dst) = u8Tmp; \
8565 } while (0)
8566#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8567 do { \
8568 uint8_t u8Tmp; \
8569 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8570 (a_u32Dst) = u8Tmp; \
8571 } while (0)
8572#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8573 do { \
8574 uint8_t u8Tmp; \
8575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8576 (a_u64Dst) = u8Tmp; \
8577 } while (0)
8578#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8579 do { \
8580 uint16_t u16Tmp; \
8581 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8582 (a_u32Dst) = u16Tmp; \
8583 } while (0)
8584#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8585 do { \
8586 uint16_t u16Tmp; \
8587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8588 (a_u64Dst) = u16Tmp; \
8589 } while (0)
8590#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8591 do { \
8592 uint32_t u32Tmp; \
8593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8594 (a_u64Dst) = u32Tmp; \
8595 } while (0)
8596
8597#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8598 do { \
8599 uint8_t u8Tmp; \
8600 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8601 (a_u16Dst) = (int8_t)u8Tmp; \
8602 } while (0)
8603#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8604 do { \
8605 uint8_t u8Tmp; \
8606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8607 (a_u32Dst) = (int8_t)u8Tmp; \
8608 } while (0)
8609#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8610 do { \
8611 uint8_t u8Tmp; \
8612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8613 (a_u64Dst) = (int8_t)u8Tmp; \
8614 } while (0)
8615#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8616 do { \
8617 uint16_t u16Tmp; \
8618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8619 (a_u32Dst) = (int16_t)u16Tmp; \
8620 } while (0)
8621#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8622 do { \
8623 uint16_t u16Tmp; \
8624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8625 (a_u64Dst) = (int16_t)u16Tmp; \
8626 } while (0)
8627#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8628 do { \
8629 uint32_t u32Tmp; \
8630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8631 (a_u64Dst) = (int32_t)u32Tmp; \
8632 } while (0)
8633
8634#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8635 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8636#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8637 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8638#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8639 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8640#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8641 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8642
8643#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8644 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8645#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8646 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8647#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8648 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8649#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8650 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8651
8652#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8653#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8654#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8655#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8656#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8657#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8658#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8659 do { \
8660 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8661 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8662 } while (0)
8663
8664#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8666#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8667 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8668
8669
8670#define IEM_MC_PUSH_U16(a_u16Value) \
8671 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8672#define IEM_MC_PUSH_U32(a_u32Value) \
8673 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8674#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8675 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8676#define IEM_MC_PUSH_U64(a_u64Value) \
8677 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8678
8679#define IEM_MC_POP_U16(a_pu16Value) \
8680 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8681#define IEM_MC_POP_U32(a_pu32Value) \
8682 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8683#define IEM_MC_POP_U64(a_pu64Value) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8685
8686/** Maps guest memory for direct or bounce buffered access.
8687 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8688 * @remarks May return.
8689 */
8690#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8691 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8692
8693/** Maps guest memory for direct or bounce buffered access.
8694 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8695 * @remarks May return.
8696 */
8697#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8698 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8699
8700/** Commits the memory and unmaps the guest memory.
8701 * @remarks May return.
8702 */
8703#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8704 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8705
8706/** Commits the memory and unmaps the guest memory unless the FPU status word
8707 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8708 * that would cause FLD not to store.
8709 *
8710 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8711 * store, while \#P will not.
8712 *
8713 * @remarks May in theory return - for now.
8714 */
8715#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8716 do { \
8717 if ( !(a_u16FSW & X86_FSW_ES) \
8718 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8719 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
8720 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8721 } while (0)
8722
8723/** Calculate efficient address from R/M. */
8724#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8725 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8726
8727#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8728#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8729#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8730#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8731#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8732#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8733#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8734
8735/**
8736 * Defers the rest of the instruction emulation to a C implementation routine
8737 * and returns, only taking the standard parameters.
8738 *
8739 * @param a_pfnCImpl The pointer to the C routine.
8740 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8741 */
8742#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8743
8744/**
8745 * Defers the rest of instruction emulation to a C implementation routine and
8746 * returns, taking one argument in addition to the standard ones.
8747 *
8748 * @param a_pfnCImpl The pointer to the C routine.
8749 * @param a0 The argument.
8750 */
8751#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8752
8753/**
8754 * Defers the rest of the instruction emulation to a C implementation routine
8755 * and returns, taking two arguments in addition to the standard ones.
8756 *
8757 * @param a_pfnCImpl The pointer to the C routine.
8758 * @param a0 The first extra argument.
8759 * @param a1 The second extra argument.
8760 */
8761#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8762
8763/**
8764 * Defers the rest of the instruction emulation to a C implementation routine
8765 * and returns, taking three arguments in addition to the standard ones.
8766 *
8767 * @param a_pfnCImpl The pointer to the C routine.
8768 * @param a0 The first extra argument.
8769 * @param a1 The second extra argument.
8770 * @param a2 The third extra argument.
8771 */
8772#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8773
8774/**
8775 * Defers the rest of the instruction emulation to a C implementation routine
8776 * and returns, taking four arguments in addition to the standard ones.
8777 *
8778 * @param a_pfnCImpl The pointer to the C routine.
8779 * @param a0 The first extra argument.
8780 * @param a1 The second extra argument.
8781 * @param a2 The third extra argument.
8782 * @param a3 The fourth extra argument.
8783 */
8784#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8785
8786/**
8787 * Defers the rest of the instruction emulation to a C implementation routine
8788 * and returns, taking two arguments in addition to the standard ones.
8789 *
8790 * @param a_pfnCImpl The pointer to the C routine.
8791 * @param a0 The first extra argument.
8792 * @param a1 The second extra argument.
8793 * @param a2 The third extra argument.
8794 * @param a3 The fourth extra argument.
8795 * @param a4 The fifth extra argument.
8796 */
8797#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8798
8799/**
8800 * Defers the entire instruction emulation to a C implementation routine and
8801 * returns, only taking the standard parameters.
8802 *
8803 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8804 *
8805 * @param a_pfnCImpl The pointer to the C routine.
8806 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8807 */
8808#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8809
8810/**
8811 * Defers the entire instruction emulation to a C implementation routine and
8812 * returns, taking one argument in addition to the standard ones.
8813 *
8814 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8815 *
8816 * @param a_pfnCImpl The pointer to the C routine.
8817 * @param a0 The argument.
8818 */
8819#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8820
8821/**
8822 * Defers the entire instruction emulation to a C implementation routine and
8823 * returns, taking two arguments in addition to the standard ones.
8824 *
8825 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8826 *
8827 * @param a_pfnCImpl The pointer to the C routine.
8828 * @param a0 The first extra argument.
8829 * @param a1 The second extra argument.
8830 */
8831#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8832
8833/**
8834 * Defers the entire instruction emulation to a C implementation routine and
8835 * returns, taking three arguments in addition to the standard ones.
8836 *
8837 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8838 *
8839 * @param a_pfnCImpl The pointer to the C routine.
8840 * @param a0 The first extra argument.
8841 * @param a1 The second extra argument.
8842 * @param a2 The third extra argument.
8843 */
8844#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8845
8846/**
8847 * Calls a FPU assembly implementation taking one visible argument.
8848 *
8849 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8850 * @param a0 The first extra argument.
8851 */
8852#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8853 do { \
8854 iemFpuPrepareUsage(pIemCpu); \
8855 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
8856 } while (0)
8857
8858/**
8859 * Calls a FPU assembly implementation taking two visible arguments.
8860 *
8861 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8862 * @param a0 The first extra argument.
8863 * @param a1 The second extra argument.
8864 */
8865#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
8866 do { \
8867 iemFpuPrepareUsage(pIemCpu); \
8868 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
8869 } while (0)
8870
8871/**
8872 * Calls a FPU assembly implementation taking three visible arguments.
8873 *
8874 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8875 * @param a0 The first extra argument.
8876 * @param a1 The second extra argument.
8877 * @param a2 The third extra argument.
8878 */
8879#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8880 do { \
8881 iemFpuPrepareUsage(pIemCpu); \
8882 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
8883 } while (0)
8884
8885#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
8886 do { \
8887 (a_FpuData).FSW = (a_FSW); \
8888 (a_FpuData).r80Result = *(a_pr80Value); \
8889 } while (0)
8890
8891/** Pushes FPU result onto the stack. */
8892#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
8893 iemFpuPushResult(pIemCpu, &a_FpuData)
8894/** Pushes FPU result onto the stack and sets the FPUDP. */
8895#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
8896 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
8897
8898/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
8899#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
8900 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
8901
8902/** Stores FPU result in a stack register. */
8903#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
8904 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
8905/** Stores FPU result in a stack register and pops the stack. */
8906#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
8907 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
8908/** Stores FPU result in a stack register and sets the FPUDP. */
8909#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8910 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8911/** Stores FPU result in a stack register, sets the FPUDP, and pops the
8912 * stack. */
8913#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
8914 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
8915
8916/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
8917#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
8918 iemFpuUpdateOpcodeAndIp(pIemCpu)
8919/** Free a stack register (for FFREE and FFREEP). */
8920#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
8921 iemFpuStackFree(pIemCpu, a_iStReg)
8922/** Increment the FPU stack pointer. */
8923#define IEM_MC_FPU_STACK_INC_TOP() \
8924 iemFpuStackIncTop(pIemCpu)
8925/** Decrement the FPU stack pointer. */
8926#define IEM_MC_FPU_STACK_DEC_TOP() \
8927 iemFpuStackDecTop(pIemCpu)
8928
8929/** Updates the FSW, FOP, FPUIP, and FPUCS. */
8930#define IEM_MC_UPDATE_FSW(a_u16FSW) \
8931 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8932/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
8933#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
8934 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
8935/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
8936#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8937 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8938/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
8939#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
8940 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8941/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
8942 * stack. */
8943#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
8944 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
8945/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
8946#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
8947 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
8948
8949/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
8950#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
8951 iemFpuStackUnderflow(pIemCpu, a_iStDst)
8952/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8953 * stack. */
8954#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
8955 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
8956/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8957 * FPUDS. */
8958#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8959 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8960/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8961 * FPUDS. Pops stack. */
8962#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8963 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8964/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8965 * stack twice. */
8966#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8967 iemFpuStackUnderflowThenPopPop(pIemCpu)
8968/** Raises a FPU stack underflow exception for an instruction pushing a result
8969 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8970#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8971 iemFpuStackPushUnderflow(pIemCpu)
8972/** Raises a FPU stack underflow exception for an instruction pushing a result
8973 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8974#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8975 iemFpuStackPushUnderflowTwo(pIemCpu)
8976
8977/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8978 * FPUIP, FPUCS and FOP. */
8979#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
8980 iemFpuStackPushOverflow(pIemCpu)
8981/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8982 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
8983#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
8984 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
8985/** Indicates that we (might) have modified the FPU state. */
8986#define IEM_MC_USED_FPU() \
8987 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
8988
8989/**
8990 * Calls a MMX assembly implementation taking two visible arguments.
8991 *
8992 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8993 * @param a0 The first extra argument.
8994 * @param a1 The second extra argument.
8995 */
8996#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
8997 do { \
8998 iemFpuPrepareUsage(pIemCpu); \
8999 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9000 } while (0)
9001
9002/**
9003 * Calls a MMX assembly implementation taking three visible arguments.
9004 *
9005 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9006 * @param a0 The first extra argument.
9007 * @param a1 The second extra argument.
9008 * @param a2 The third extra argument.
9009 */
9010#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9011 do { \
9012 iemFpuPrepareUsage(pIemCpu); \
9013 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9014 } while (0)
9015
9016
9017/**
9018 * Calls a SSE assembly implementation taking two visible arguments.
9019 *
9020 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9021 * @param a0 The first extra argument.
9022 * @param a1 The second extra argument.
9023 */
9024#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9025 do { \
9026 iemFpuPrepareUsageSse(pIemCpu); \
9027 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
9028 } while (0)
9029
9030/**
9031 * Calls a SSE assembly implementation taking three visible arguments.
9032 *
9033 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9034 * @param a0 The first extra argument.
9035 * @param a1 The second extra argument.
9036 * @param a2 The third extra argument.
9037 */
9038#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9039 do { \
9040 iemFpuPrepareUsageSse(pIemCpu); \
9041 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
9042 } while (0)
9043
9044
9045/** @note Not for IOPL or IF testing. */
9046#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9047/** @note Not for IOPL or IF testing. */
9048#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9049/** @note Not for IOPL or IF testing. */
9050#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9051/** @note Not for IOPL or IF testing. */
9052#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9053/** @note Not for IOPL or IF testing. */
9054#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9055 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9056 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9057/** @note Not for IOPL or IF testing. */
9058#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9059 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9060 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9061/** @note Not for IOPL or IF testing. */
9062#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9063 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9064 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9065 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9066/** @note Not for IOPL or IF testing. */
9067#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9068 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9069 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9070 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9071#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9072#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9073#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9074/** @note Not for IOPL or IF testing. */
9075#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9076 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9077 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9078/** @note Not for IOPL or IF testing. */
9079#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9080 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9081 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9082/** @note Not for IOPL or IF testing. */
9083#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9084 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9085 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9086/** @note Not for IOPL or IF testing. */
9087#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9088 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9089 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9090/** @note Not for IOPL or IF testing. */
9091#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9092 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9093 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9094/** @note Not for IOPL or IF testing. */
9095#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9096 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9097 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9098#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9099#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9100#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9101 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9102#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9103 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9104#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9105 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9106#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9107 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9108#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9109 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9110#define IEM_MC_IF_FCW_IM() \
9111 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
9112
9113#define IEM_MC_ELSE() } else {
9114#define IEM_MC_ENDIF() } do {} while (0)
9115
9116/** @} */
9117
9118
9119/** @name Opcode Debug Helpers.
9120 * @{
9121 */
9122#ifdef DEBUG
9123# define IEMOP_MNEMONIC(a_szMnemonic) \
9124 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9125 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9126# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9127 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9128 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9129#else
9130# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9131# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9132#endif
9133
9134/** @} */
9135
9136
9137/** @name Opcode Helpers.
9138 * @{
9139 */
9140
9141/** The instruction raises an \#UD in real and V8086 mode. */
9142#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9143 do \
9144 { \
9145 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9146 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9147 } while (0)
9148
9149/** The instruction allows no lock prefixing (in this encoding), throw #UD if
9150 * lock prefixed.
9151 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9152#define IEMOP_HLP_NO_LOCK_PREFIX() \
9153 do \
9154 { \
9155 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9156 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9157 } while (0)
9158
9159/** The instruction is not available in 64-bit mode, throw #UD if we're in
9160 * 64-bit mode. */
9161#define IEMOP_HLP_NO_64BIT() \
9162 do \
9163 { \
9164 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9165 return IEMOP_RAISE_INVALID_OPCODE(); \
9166 } while (0)
9167
9168/** The instruction is only available in 64-bit mode, throw #UD if we're not in
9169 * 64-bit mode. */
9170#define IEMOP_HLP_ONLY_64BIT() \
9171 do \
9172 { \
9173 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9174 return IEMOP_RAISE_INVALID_OPCODE(); \
9175 } while (0)
9176
9177/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9178#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9179 do \
9180 { \
9181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9182 iemRecalEffOpSize64Default(pIemCpu); \
9183 } while (0)
9184
9185/** The instruction has 64-bit operand size if 64-bit mode. */
9186#define IEMOP_HLP_64BIT_OP_SIZE() \
9187 do \
9188 { \
9189 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9190 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9191 } while (0)
9192
9193/** Only a REX prefix immediately preceeding the first opcode byte takes
9194 * effect. This macro helps ensuring this as well as logging bad guest code. */
9195#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9196 do \
9197 { \
9198 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9199 { \
9200 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9201 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9202 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9203 pIemCpu->uRexB = 0; \
9204 pIemCpu->uRexIndex = 0; \
9205 pIemCpu->uRexReg = 0; \
9206 iemRecalEffOpSize(pIemCpu); \
9207 } \
9208 } while (0)
9209
9210/**
9211 * Done decoding.
9212 */
9213#define IEMOP_HLP_DONE_DECODING() \
9214 do \
9215 { \
9216 /*nothing for now, maybe later... */ \
9217 } while (0)
9218
9219/**
9220 * Done decoding, raise \#UD exception if lock prefix present.
9221 */
9222#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9223 do \
9224 { \
9225 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9226 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9227 } while (0)
9228#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9229 do \
9230 { \
9231 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9232 { \
9233 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9234 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9235 } \
9236 } while (0)
9237#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9238 do \
9239 { \
9240 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9241 { \
9242 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9243 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9244 } \
9245 } while (0)
9246
9247
9248/**
9249 * Calculates the effective address of a ModR/M memory operand.
9250 *
9251 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9252 *
9253 * @return Strict VBox status code.
9254 * @param pIemCpu The IEM per CPU data.
9255 * @param bRm The ModRM byte.
9256 * @param cbImm The size of any immediate following the
9257 * effective address opcode bytes. Important for
9258 * RIP relative addressing.
9259 * @param pGCPtrEff Where to return the effective address.
9260 */
9261static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9262{
9263 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9264 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9265#define SET_SS_DEF() \
9266 do \
9267 { \
9268 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9269 pIemCpu->iEffSeg = X86_SREG_SS; \
9270 } while (0)
9271
9272 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9273 {
9274/** @todo Check the effective address size crap! */
9275 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9276 {
9277 uint16_t u16EffAddr;
9278
9279 /* Handle the disp16 form with no registers first. */
9280 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9281 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9282 else
9283 {
9284 /* Get the displacment. */
9285 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9286 {
9287 case 0: u16EffAddr = 0; break;
9288 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9289 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9290 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9291 }
9292
9293 /* Add the base and index registers to the disp. */
9294 switch (bRm & X86_MODRM_RM_MASK)
9295 {
9296 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9297 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9298 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9299 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9300 case 4: u16EffAddr += pCtx->si; break;
9301 case 5: u16EffAddr += pCtx->di; break;
9302 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9303 case 7: u16EffAddr += pCtx->bx; break;
9304 }
9305 }
9306
9307 *pGCPtrEff = u16EffAddr;
9308 }
9309 else
9310 {
9311 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9312 uint32_t u32EffAddr;
9313
9314 /* Handle the disp32 form with no registers first. */
9315 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9316 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9317 else
9318 {
9319 /* Get the register (or SIB) value. */
9320 switch ((bRm & X86_MODRM_RM_MASK))
9321 {
9322 case 0: u32EffAddr = pCtx->eax; break;
9323 case 1: u32EffAddr = pCtx->ecx; break;
9324 case 2: u32EffAddr = pCtx->edx; break;
9325 case 3: u32EffAddr = pCtx->ebx; break;
9326 case 4: /* SIB */
9327 {
9328 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9329
9330 /* Get the index and scale it. */
9331 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9332 {
9333 case 0: u32EffAddr = pCtx->eax; break;
9334 case 1: u32EffAddr = pCtx->ecx; break;
9335 case 2: u32EffAddr = pCtx->edx; break;
9336 case 3: u32EffAddr = pCtx->ebx; break;
9337 case 4: u32EffAddr = 0; /*none */ break;
9338 case 5: u32EffAddr = pCtx->ebp; break;
9339 case 6: u32EffAddr = pCtx->esi; break;
9340 case 7: u32EffAddr = pCtx->edi; break;
9341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9342 }
9343 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9344
9345 /* add base */
9346 switch (bSib & X86_SIB_BASE_MASK)
9347 {
9348 case 0: u32EffAddr += pCtx->eax; break;
9349 case 1: u32EffAddr += pCtx->ecx; break;
9350 case 2: u32EffAddr += pCtx->edx; break;
9351 case 3: u32EffAddr += pCtx->ebx; break;
9352 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9353 case 5:
9354 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9355 {
9356 u32EffAddr += pCtx->ebp;
9357 SET_SS_DEF();
9358 }
9359 else
9360 {
9361 uint32_t u32Disp;
9362 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9363 u32EffAddr += u32Disp;
9364 }
9365 break;
9366 case 6: u32EffAddr += pCtx->esi; break;
9367 case 7: u32EffAddr += pCtx->edi; break;
9368 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9369 }
9370 break;
9371 }
9372 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9373 case 6: u32EffAddr = pCtx->esi; break;
9374 case 7: u32EffAddr = pCtx->edi; break;
9375 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9376 }
9377
9378 /* Get and add the displacement. */
9379 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9380 {
9381 case 0:
9382 break;
9383 case 1:
9384 {
9385 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9386 u32EffAddr += i8Disp;
9387 break;
9388 }
9389 case 2:
9390 {
9391 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9392 u32EffAddr += u32Disp;
9393 break;
9394 }
9395 default:
9396 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
9397 }
9398
9399 }
9400 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9401 *pGCPtrEff = u32EffAddr;
9402 else
9403 {
9404 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9405 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9406 }
9407 }
9408 }
9409 else
9410 {
9411 uint64_t u64EffAddr;
9412
9413 /* Handle the rip+disp32 form with no registers first. */
9414 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9415 {
9416 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9417 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9418 }
9419 else
9420 {
9421 /* Get the register (or SIB) value. */
9422 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9423 {
9424 case 0: u64EffAddr = pCtx->rax; break;
9425 case 1: u64EffAddr = pCtx->rcx; break;
9426 case 2: u64EffAddr = pCtx->rdx; break;
9427 case 3: u64EffAddr = pCtx->rbx; break;
9428 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9429 case 6: u64EffAddr = pCtx->rsi; break;
9430 case 7: u64EffAddr = pCtx->rdi; break;
9431 case 8: u64EffAddr = pCtx->r8; break;
9432 case 9: u64EffAddr = pCtx->r9; break;
9433 case 10: u64EffAddr = pCtx->r10; break;
9434 case 11: u64EffAddr = pCtx->r11; break;
9435 case 13: u64EffAddr = pCtx->r13; break;
9436 case 14: u64EffAddr = pCtx->r14; break;
9437 case 15: u64EffAddr = pCtx->r15; break;
9438 /* SIB */
9439 case 4:
9440 case 12:
9441 {
9442 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9443
9444 /* Get the index and scale it. */
9445 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9446 {
9447 case 0: u64EffAddr = pCtx->rax; break;
9448 case 1: u64EffAddr = pCtx->rcx; break;
9449 case 2: u64EffAddr = pCtx->rdx; break;
9450 case 3: u64EffAddr = pCtx->rbx; break;
9451 case 4: u64EffAddr = 0; /*none */ break;
9452 case 5: u64EffAddr = pCtx->rbp; break;
9453 case 6: u64EffAddr = pCtx->rsi; break;
9454 case 7: u64EffAddr = pCtx->rdi; break;
9455 case 8: u64EffAddr = pCtx->r8; break;
9456 case 9: u64EffAddr = pCtx->r9; break;
9457 case 10: u64EffAddr = pCtx->r10; break;
9458 case 11: u64EffAddr = pCtx->r11; break;
9459 case 12: u64EffAddr = pCtx->r12; break;
9460 case 13: u64EffAddr = pCtx->r13; break;
9461 case 14: u64EffAddr = pCtx->r14; break;
9462 case 15: u64EffAddr = pCtx->r15; break;
9463 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9464 }
9465 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9466
9467 /* add base */
9468 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9469 {
9470 case 0: u64EffAddr += pCtx->rax; break;
9471 case 1: u64EffAddr += pCtx->rcx; break;
9472 case 2: u64EffAddr += pCtx->rdx; break;
9473 case 3: u64EffAddr += pCtx->rbx; break;
9474 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9475 case 6: u64EffAddr += pCtx->rsi; break;
9476 case 7: u64EffAddr += pCtx->rdi; break;
9477 case 8: u64EffAddr += pCtx->r8; break;
9478 case 9: u64EffAddr += pCtx->r9; break;
9479 case 10: u64EffAddr += pCtx->r10; break;
9480 case 11: u64EffAddr += pCtx->r11; break;
9481 case 12: u64EffAddr += pCtx->r12; break;
9482 case 14: u64EffAddr += pCtx->r14; break;
9483 case 15: u64EffAddr += pCtx->r15; break;
9484 /* complicated encodings */
9485 case 5:
9486 case 13:
9487 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9488 {
9489 if (!pIemCpu->uRexB)
9490 {
9491 u64EffAddr += pCtx->rbp;
9492 SET_SS_DEF();
9493 }
9494 else
9495 u64EffAddr += pCtx->r13;
9496 }
9497 else
9498 {
9499 uint32_t u32Disp;
9500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9501 u64EffAddr += (int32_t)u32Disp;
9502 }
9503 break;
9504 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9505 }
9506 break;
9507 }
9508 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9509 }
9510
9511 /* Get and add the displacement. */
9512 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9513 {
9514 case 0:
9515 break;
9516 case 1:
9517 {
9518 int8_t i8Disp;
9519 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9520 u64EffAddr += i8Disp;
9521 break;
9522 }
9523 case 2:
9524 {
9525 uint32_t u32Disp;
9526 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9527 u64EffAddr += (int32_t)u32Disp;
9528 break;
9529 }
9530 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9531 }
9532
9533 }
9534
9535 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9536 *pGCPtrEff = u64EffAddr;
9537 else
9538 {
9539 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9540 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9541 }
9542 }
9543
9544 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9545 return VINF_SUCCESS;
9546}
9547
9548/** @} */
9549
9550
9551
9552/*
9553 * Include the instructions
9554 */
9555#include "IEMAllInstructions.cpp.h"
9556
9557
9558
9559
9560#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9561
9562/**
9563 * Sets up execution verification mode.
9564 */
9565static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9566{
9567 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9568 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9569
9570 /*
9571 * Always note down the address of the current instruction.
9572 */
9573 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9574 pIemCpu->uOldRip = pOrgCtx->rip;
9575
9576 /*
9577 * Enable verification and/or logging.
9578 */
9579 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9580 if ( fNewNoRem
9581 && ( 0
9582#if 0 /* auto enable on first paged protected mode interrupt */
9583 || ( pOrgCtx->eflags.Bits.u1IF
9584 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9585 && TRPMHasTrap(pVCpu)
9586 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9587#endif
9588#if 0
9589 || ( pOrgCtx->cs == 0x10
9590 && ( pOrgCtx->rip == 0x90119e3e
9591 || pOrgCtx->rip == 0x901d9810)
9592#endif
9593#if 0 /* Auto enable DSL - FPU stuff. */
9594 || ( pOrgCtx->cs == 0x10
9595 && (// pOrgCtx->rip == 0xc02ec07f
9596 //|| pOrgCtx->rip == 0xc02ec082
9597 //|| pOrgCtx->rip == 0xc02ec0c9
9598 0
9599 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9600#endif
9601#if 0 /* Auto enable DSL - fstp st0 stuff. */
9602 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9603#endif
9604#if 0
9605 || pOrgCtx->rip == 0x9022bb3a
9606#endif
9607#if 0
9608 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9609#endif
9610#if 0
9611 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9612 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9613#endif
9614#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9615 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9616 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9617 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9618#endif
9619#if 0 /* NT4SP1 - xadd early boot. */
9620 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9621#endif
9622#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9623 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9624#endif
9625#if 0 /* NT4SP1 - cmpxchg (AMD). */
9626 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9627#endif
9628#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9629 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9630#endif
9631#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9632 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9633
9634#endif
9635#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9636 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9637
9638#endif
9639#if 0 /* NT4SP1 - frstor [ecx] */
9640 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9641#endif
9642#if 0 /* xxxxxx - All long mode code. */
9643 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9644#endif
9645#if 0 /* rep movsq linux 3.7 64-bit boot. */
9646 || (pOrgCtx->rip == 0x0000000000100241)
9647#endif
9648#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9649 || (pOrgCtx->rip == 0x000000000215e240)
9650#endif
9651#if 0 /* DOS's size-overridden iret to v8086. */
9652 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9653#endif
9654 )
9655 )
9656 {
9657 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9658 RTLogFlags(NULL, "enabled");
9659 fNewNoRem = false;
9660 }
9661 if (fNewNoRem != pIemCpu->fNoRem)
9662 {
9663 pIemCpu->fNoRem = fNewNoRem;
9664 if (!fNewNoRem)
9665 {
9666 LogAlways(("Enabling verification mode!\n"));
9667 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9668 }
9669 else
9670 LogAlways(("Disabling verification mode!\n"));
9671 }
9672
9673 /*
9674 * Switch state.
9675 */
9676 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9677 {
9678 static CPUMCTX s_DebugCtx; /* Ugly! */
9679
9680 s_DebugCtx = *pOrgCtx;
9681 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9682 }
9683
9684 /*
9685 * See if there is an interrupt pending in TRPM and inject it if we can.
9686 */
9687 pIemCpu->uInjectCpl = UINT8_MAX;
9688 if ( pOrgCtx->eflags.Bits.u1IF
9689 && TRPMHasTrap(pVCpu)
9690 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9691 {
9692 uint8_t u8TrapNo;
9693 TRPMEVENT enmType;
9694 RTGCUINT uErrCode;
9695 RTGCPTR uCr2;
9696 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9697 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9698 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9699 TRPMResetTrap(pVCpu);
9700 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9701 }
9702
9703 /*
9704 * Reset the counters.
9705 */
9706 pIemCpu->cIOReads = 0;
9707 pIemCpu->cIOWrites = 0;
9708 pIemCpu->fIgnoreRaxRdx = false;
9709 pIemCpu->fOverlappingMovs = false;
9710 pIemCpu->fProblematicMemory = false;
9711 pIemCpu->fUndefinedEFlags = 0;
9712
9713 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9714 {
9715 /*
9716 * Free all verification records.
9717 */
9718 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9719 pIemCpu->pIemEvtRecHead = NULL;
9720 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9721 do
9722 {
9723 while (pEvtRec)
9724 {
9725 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9726 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9727 pIemCpu->pFreeEvtRec = pEvtRec;
9728 pEvtRec = pNext;
9729 }
9730 pEvtRec = pIemCpu->pOtherEvtRecHead;
9731 pIemCpu->pOtherEvtRecHead = NULL;
9732 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9733 } while (pEvtRec);
9734 }
9735}
9736
9737
9738/**
9739 * Allocate an event record.
9740 * @returns Pointer to a record.
9741 */
9742static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9743{
9744 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9745 return NULL;
9746
9747 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9748 if (pEvtRec)
9749 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9750 else
9751 {
9752 if (!pIemCpu->ppIemEvtRecNext)
9753 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9754
9755 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9756 if (!pEvtRec)
9757 return NULL;
9758 }
9759 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9760 pEvtRec->pNext = NULL;
9761 return pEvtRec;
9762}
9763
9764
9765/**
9766 * IOMMMIORead notification.
9767 */
9768VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9769{
9770 PVMCPU pVCpu = VMMGetCpu(pVM);
9771 if (!pVCpu)
9772 return;
9773 PIEMCPU pIemCpu = &pVCpu->iem.s;
9774 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9775 if (!pEvtRec)
9776 return;
9777 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9778 pEvtRec->u.RamRead.GCPhys = GCPhys;
9779 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9780 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9781 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9782}
9783
9784
9785/**
9786 * IOMMMIOWrite notification.
9787 */
9788VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9789{
9790 PVMCPU pVCpu = VMMGetCpu(pVM);
9791 if (!pVCpu)
9792 return;
9793 PIEMCPU pIemCpu = &pVCpu->iem.s;
9794 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9795 if (!pEvtRec)
9796 return;
9797 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9798 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9799 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9800 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9801 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9802 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9803 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9804 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9805 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9806}
9807
9808
9809/**
9810 * IOMIOPortRead notification.
9811 */
9812VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9813{
9814 PVMCPU pVCpu = VMMGetCpu(pVM);
9815 if (!pVCpu)
9816 return;
9817 PIEMCPU pIemCpu = &pVCpu->iem.s;
9818 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9819 if (!pEvtRec)
9820 return;
9821 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9822 pEvtRec->u.IOPortRead.Port = Port;
9823 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9824 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9825 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9826}
9827
9828/**
9829 * IOMIOPortWrite notification.
9830 */
9831VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9832{
9833 PVMCPU pVCpu = VMMGetCpu(pVM);
9834 if (!pVCpu)
9835 return;
9836 PIEMCPU pIemCpu = &pVCpu->iem.s;
9837 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9838 if (!pEvtRec)
9839 return;
9840 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9841 pEvtRec->u.IOPortWrite.Port = Port;
9842 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9843 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9844 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9845 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9846}
9847
9848
9849VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
9850{
9851 AssertFailed();
9852}
9853
9854
9855VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
9856{
9857 AssertFailed();
9858}
9859
9860
9861/**
9862 * Fakes and records an I/O port read.
9863 *
9864 * @returns VINF_SUCCESS.
9865 * @param pIemCpu The IEM per CPU data.
9866 * @param Port The I/O port.
9867 * @param pu32Value Where to store the fake value.
9868 * @param cbValue The size of the access.
9869 */
9870static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9871{
9872 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9873 if (pEvtRec)
9874 {
9875 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9876 pEvtRec->u.IOPortRead.Port = Port;
9877 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9878 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9879 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9880 }
9881 pIemCpu->cIOReads++;
9882 *pu32Value = 0xcccccccc;
9883 return VINF_SUCCESS;
9884}
9885
9886
9887/**
9888 * Fakes and records an I/O port write.
9889 *
9890 * @returns VINF_SUCCESS.
9891 * @param pIemCpu The IEM per CPU data.
9892 * @param Port The I/O port.
9893 * @param u32Value The value being written.
9894 * @param cbValue The size of the access.
9895 */
9896static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9897{
9898 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9899 if (pEvtRec)
9900 {
9901 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
9902 pEvtRec->u.IOPortWrite.Port = Port;
9903 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
9904 pEvtRec->u.IOPortWrite.u32Value = u32Value;
9905 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
9906 *pIemCpu->ppIemEvtRecNext = pEvtRec;
9907 }
9908 pIemCpu->cIOWrites++;
9909 return VINF_SUCCESS;
9910}
9911
9912
9913/**
9914 * Used to add extra details about a stub case.
9915 * @param pIemCpu The IEM per CPU state.
9916 */
9917static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
9918{
9919 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9920 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9922 char szRegs[4096];
9923 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
9924 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
9925 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
9926 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
9927 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
9928 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
9929 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
9930 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
9931 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
9932 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
9933 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
9934 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
9935 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
9936 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
9937 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
9938 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
9939 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
9940 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
9941 " efer=%016VR{efer}\n"
9942 " pat=%016VR{pat}\n"
9943 " sf_mask=%016VR{sf_mask}\n"
9944 "krnl_gs_base=%016VR{krnl_gs_base}\n"
9945 " lstar=%016VR{lstar}\n"
9946 " star=%016VR{star} cstar=%016VR{cstar}\n"
9947 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
9948 );
9949
9950 char szInstr1[256];
9951 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
9952 DBGF_DISAS_FLAGS_DEFAULT_MODE,
9953 szInstr1, sizeof(szInstr1), NULL);
9954 char szInstr2[256];
9955 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
9956 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9957 szInstr2, sizeof(szInstr2), NULL);
9958
9959 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9960}
9961
9962
9963/**
9964 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9965 * dump to the assertion info.
9966 *
9967 * @param pEvtRec The record to dump.
9968 */
9969static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9970{
9971 switch (pEvtRec->enmEvent)
9972 {
9973 case IEMVERIFYEVENT_IOPORT_READ:
9974 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9975 pEvtRec->u.IOPortWrite.Port,
9976 pEvtRec->u.IOPortWrite.cbValue);
9977 break;
9978 case IEMVERIFYEVENT_IOPORT_WRITE:
9979 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
9980 pEvtRec->u.IOPortWrite.Port,
9981 pEvtRec->u.IOPortWrite.cbValue,
9982 pEvtRec->u.IOPortWrite.u32Value);
9983 break;
9984 case IEMVERIFYEVENT_RAM_READ:
9985 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
9986 pEvtRec->u.RamRead.GCPhys,
9987 pEvtRec->u.RamRead.cb);
9988 break;
9989 case IEMVERIFYEVENT_RAM_WRITE:
9990 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
9991 pEvtRec->u.RamWrite.GCPhys,
9992 pEvtRec->u.RamWrite.cb,
9993 (int)pEvtRec->u.RamWrite.cb,
9994 pEvtRec->u.RamWrite.ab);
9995 break;
9996 default:
9997 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
9998 break;
9999 }
10000}
10001
10002
10003/**
10004 * Raises an assertion on the specified record, showing the given message with
10005 * a record dump attached.
10006 *
10007 * @param pIemCpu The IEM per CPU data.
10008 * @param pEvtRec1 The first record.
10009 * @param pEvtRec2 The second record.
10010 * @param pszMsg The message explaining why we're asserting.
10011 */
10012static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10013{
10014 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10015 iemVerifyAssertAddRecordDump(pEvtRec1);
10016 iemVerifyAssertAddRecordDump(pEvtRec2);
10017 iemVerifyAssertMsg2(pIemCpu);
10018 RTAssertPanic();
10019}
10020
10021
10022/**
10023 * Raises an assertion on the specified record, showing the given message with
10024 * a record dump attached.
10025 *
10026 * @param pIemCpu The IEM per CPU data.
10027 * @param pEvtRec1 The first record.
10028 * @param pszMsg The message explaining why we're asserting.
10029 */
10030static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10031{
10032 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10033 iemVerifyAssertAddRecordDump(pEvtRec);
10034 iemVerifyAssertMsg2(pIemCpu);
10035 RTAssertPanic();
10036}
10037
10038
10039/**
10040 * Verifies a write record.
10041 *
10042 * @param pIemCpu The IEM per CPU data.
10043 * @param pEvtRec The write record.
10044 * @param fRem Set if REM was doing the other executing. If clear
10045 * it was HM.
10046 */
10047static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10048{
10049 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10050 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10051 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10052 if ( RT_FAILURE(rc)
10053 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10054 {
10055 /* fend off ins */
10056 if ( !pIemCpu->cIOReads
10057 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10058 || ( pEvtRec->u.RamWrite.cb != 1
10059 && pEvtRec->u.RamWrite.cb != 2
10060 && pEvtRec->u.RamWrite.cb != 4) )
10061 {
10062 /* fend off ROMs and MMIO */
10063 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10064 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10065 {
10066 /* fend off fxsave */
10067 if (pEvtRec->u.RamWrite.cb != 512)
10068 {
10069 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10070 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10071 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10072 RTAssertMsg2Add("%s: %.*Rhxs\n"
10073 "iem: %.*Rhxs\n",
10074 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10075 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10076 iemVerifyAssertAddRecordDump(pEvtRec);
10077 iemVerifyAssertMsg2(pIemCpu);
10078 RTAssertPanic();
10079 }
10080 }
10081 }
10082 }
10083
10084}
10085
10086/**
10087 * Performs the post-execution verfication checks.
10088 */
10089static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10090{
10091 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10092 return;
10093
10094 /*
10095 * Switch back the state.
10096 */
10097 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10098 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10099 Assert(pOrgCtx != pDebugCtx);
10100 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10101
10102 /*
10103 * Execute the instruction in REM.
10104 */
10105 bool fRem = false;
10106 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10107 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10108 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10109#ifdef IEM_VERIFICATION_MODE_FULL_HM
10110 if ( HMIsEnabled(pVM)
10111 && pIemCpu->cIOReads == 0
10112 && pIemCpu->cIOWrites == 0
10113 && !pIemCpu->fProblematicMemory)
10114 {
10115 unsigned iLoops = 0;
10116 do
10117 {
10118 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10119 iLoops++;
10120 } while ( rc == VINF_SUCCESS
10121 || ( rc == VINF_EM_DBG_STEPPED
10122 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10123 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10124 || ( pOrgCtx->rip != pDebugCtx->rip
10125 && pIemCpu->uInjectCpl != UINT8_MAX
10126 && iLoops < 8) );
10127 }
10128#endif
10129 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10130 || rc == VINF_IOM_R3_IOPORT_READ
10131 || rc == VINF_IOM_R3_IOPORT_WRITE
10132 || rc == VINF_IOM_R3_MMIO_READ
10133 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10134 || rc == VINF_IOM_R3_MMIO_WRITE
10135 )
10136 {
10137 EMRemLock(pVM);
10138 rc = REMR3EmulateInstruction(pVM, pVCpu);
10139 AssertRC(rc);
10140 EMRemUnlock(pVM);
10141 fRem = true;
10142 }
10143
10144 /*
10145 * Compare the register states.
10146 */
10147 unsigned cDiffs = 0;
10148 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10149 {
10150 //Log(("REM and IEM ends up with different registers!\n"));
10151 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10152
10153# define CHECK_FIELD(a_Field) \
10154 do \
10155 { \
10156 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10157 { \
10158 switch (sizeof(pOrgCtx->a_Field)) \
10159 { \
10160 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10161 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10162 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10163 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10164 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10165 } \
10166 cDiffs++; \
10167 } \
10168 } while (0)
10169
10170# define CHECK_BIT_FIELD(a_Field) \
10171 do \
10172 { \
10173 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10174 { \
10175 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10176 cDiffs++; \
10177 } \
10178 } while (0)
10179
10180# define CHECK_SEL(a_Sel) \
10181 do \
10182 { \
10183 CHECK_FIELD(a_Sel.Sel); \
10184 CHECK_FIELD(a_Sel.Attr.u); \
10185 CHECK_FIELD(a_Sel.u64Base); \
10186 CHECK_FIELD(a_Sel.u32Limit); \
10187 CHECK_FIELD(a_Sel.fFlags); \
10188 } while (0)
10189
10190#if 1 /* The recompiler doesn't update these the intel way. */
10191 if (fRem)
10192 {
10193 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
10194 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
10195 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
10196 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
10197 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
10198 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
10199 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
10200 //pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK;
10201 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
10202 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
10203 }
10204#endif
10205 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
10206 {
10207 RTAssertMsg2Weak(" the FPU state differs\n");
10208 cDiffs++;
10209 CHECK_FIELD(fpu.FCW);
10210 CHECK_FIELD(fpu.FSW);
10211 CHECK_FIELD(fpu.FTW);
10212 CHECK_FIELD(fpu.FOP);
10213 CHECK_FIELD(fpu.FPUIP);
10214 CHECK_FIELD(fpu.CS);
10215 CHECK_FIELD(fpu.Rsrvd1);
10216 CHECK_FIELD(fpu.FPUDP);
10217 CHECK_FIELD(fpu.DS);
10218 CHECK_FIELD(fpu.Rsrvd2);
10219 CHECK_FIELD(fpu.MXCSR);
10220 CHECK_FIELD(fpu.MXCSR_MASK);
10221 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
10222 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
10223 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
10224 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
10225 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
10226 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
10227 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
10228 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
10229 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
10230 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
10231 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
10232 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
10233 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
10234 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
10235 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
10236 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
10237 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
10238 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
10239 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
10240 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
10241 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
10242 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
10243 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
10244 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
10245 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
10246 CHECK_FIELD(fpu.au32RsrvdRest[i]);
10247 }
10248 CHECK_FIELD(rip);
10249 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10250 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10251 {
10252 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10253 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10254 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10255 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10256 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10257 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10258 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10259 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10260 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10261 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10262 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10263 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10264 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10265 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10266 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10267 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10268 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10269 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10270 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10271 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10272 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10273 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10274 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10275 }
10276
10277 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10278 CHECK_FIELD(rax);
10279 CHECK_FIELD(rcx);
10280 if (!pIemCpu->fIgnoreRaxRdx)
10281 CHECK_FIELD(rdx);
10282 CHECK_FIELD(rbx);
10283 CHECK_FIELD(rsp);
10284 CHECK_FIELD(rbp);
10285 CHECK_FIELD(rsi);
10286 CHECK_FIELD(rdi);
10287 CHECK_FIELD(r8);
10288 CHECK_FIELD(r9);
10289 CHECK_FIELD(r10);
10290 CHECK_FIELD(r11);
10291 CHECK_FIELD(r12);
10292 CHECK_FIELD(r13);
10293 CHECK_SEL(cs);
10294 CHECK_SEL(ss);
10295 CHECK_SEL(ds);
10296 CHECK_SEL(es);
10297 CHECK_SEL(fs);
10298 CHECK_SEL(gs);
10299 CHECK_FIELD(cr0);
10300
10301 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10302 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10303 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10304 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10305 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10306 {
10307 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10308 { /* ignore */ }
10309 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10310 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10311 && fRem)
10312 { /* ignore */ }
10313 else
10314 CHECK_FIELD(cr2);
10315 }
10316 CHECK_FIELD(cr3);
10317 CHECK_FIELD(cr4);
10318 CHECK_FIELD(dr[0]);
10319 CHECK_FIELD(dr[1]);
10320 CHECK_FIELD(dr[2]);
10321 CHECK_FIELD(dr[3]);
10322 CHECK_FIELD(dr[6]);
10323 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10324 CHECK_FIELD(dr[7]);
10325 CHECK_FIELD(gdtr.cbGdt);
10326 CHECK_FIELD(gdtr.pGdt);
10327 CHECK_FIELD(idtr.cbIdt);
10328 CHECK_FIELD(idtr.pIdt);
10329 CHECK_SEL(ldtr);
10330 CHECK_SEL(tr);
10331 CHECK_FIELD(SysEnter.cs);
10332 CHECK_FIELD(SysEnter.eip);
10333 CHECK_FIELD(SysEnter.esp);
10334 CHECK_FIELD(msrEFER);
10335 CHECK_FIELD(msrSTAR);
10336 CHECK_FIELD(msrPAT);
10337 CHECK_FIELD(msrLSTAR);
10338 CHECK_FIELD(msrCSTAR);
10339 CHECK_FIELD(msrSFMASK);
10340 CHECK_FIELD(msrKERNELGSBASE);
10341
10342 if (cDiffs != 0)
10343 {
10344 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10345 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10346 iemVerifyAssertMsg2(pIemCpu);
10347 RTAssertPanic();
10348 }
10349# undef CHECK_FIELD
10350# undef CHECK_BIT_FIELD
10351 }
10352
10353 /*
10354 * If the register state compared fine, check the verification event
10355 * records.
10356 */
10357 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10358 {
10359 /*
10360 * Compare verficiation event records.
10361 * - I/O port accesses should be a 1:1 match.
10362 */
10363 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10364 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10365 while (pIemRec && pOtherRec)
10366 {
10367 /* Since we might miss RAM writes and reads, ignore reads and check
10368 that any written memory is the same extra ones. */
10369 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10370 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10371 && pIemRec->pNext)
10372 {
10373 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10374 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10375 pIemRec = pIemRec->pNext;
10376 }
10377
10378 /* Do the compare. */
10379 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10380 {
10381 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10382 break;
10383 }
10384 bool fEquals;
10385 switch (pIemRec->enmEvent)
10386 {
10387 case IEMVERIFYEVENT_IOPORT_READ:
10388 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10389 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10390 break;
10391 case IEMVERIFYEVENT_IOPORT_WRITE:
10392 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10393 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10394 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10395 break;
10396 case IEMVERIFYEVENT_RAM_READ:
10397 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10398 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10399 break;
10400 case IEMVERIFYEVENT_RAM_WRITE:
10401 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10402 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10403 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10404 break;
10405 default:
10406 fEquals = false;
10407 break;
10408 }
10409 if (!fEquals)
10410 {
10411 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10412 break;
10413 }
10414
10415 /* advance */
10416 pIemRec = pIemRec->pNext;
10417 pOtherRec = pOtherRec->pNext;
10418 }
10419
10420 /* Ignore extra writes and reads. */
10421 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10422 {
10423 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10424 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10425 pIemRec = pIemRec->pNext;
10426 }
10427 if (pIemRec != NULL)
10428 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10429 else if (pOtherRec != NULL)
10430 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10431 }
10432 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10433}
10434
10435#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10436
10437/* stubs */
10438static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10439{
10440 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10441 return VERR_INTERNAL_ERROR;
10442}
10443
10444static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10445{
10446 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10447 return VERR_INTERNAL_ERROR;
10448}
10449
10450#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10451
10452
10453#ifdef LOG_ENABLED
10454/**
10455 * Logs the current instruction.
10456 * @param pVCpu The cross context virtual CPU structure of the caller.
10457 * @param pCtx The current CPU context.
10458 * @param fSameCtx Set if we have the same context information as the VMM,
10459 * clear if we may have already executed an instruction in
10460 * our debug context. When clear, we assume IEMCPU holds
10461 * valid CPU mode info.
10462 */
10463static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10464{
10465# ifdef IN_RING3
10466 if (LogIs2Enabled())
10467 {
10468 char szInstr[256];
10469 uint32_t cbInstr = 0;
10470 if (fSameCtx)
10471 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10473 szInstr, sizeof(szInstr), &cbInstr);
10474 else
10475 {
10476 uint32_t fFlags = 0;
10477 switch (pVCpu->iem.s.enmCpuMode)
10478 {
10479 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10480 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10481 case IEMMODE_16BIT:
10482 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10483 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10484 else
10485 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10486 break;
10487 }
10488 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10489 szInstr, sizeof(szInstr), &cbInstr);
10490 }
10491
10492 Log2(("****\n"
10493 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10494 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10495 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10496 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10497 " %s\n"
10498 ,
10499 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10500 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10501 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10502 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10503 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
10504 szInstr));
10505
10506 if (LogIs3Enabled())
10507 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10508 }
10509 else
10510# endif
10511 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10512 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10513}
10514#endif
10515
10516
10517/**
10518 * Makes status code addjustments (pass up from I/O and access handler)
10519 * as well as maintaining statistics.
10520 *
10521 * @returns Strict VBox status code to pass up.
10522 * @param pIemCpu The IEM per CPU data.
10523 * @param rcStrict The status from executing an instruction.
10524 */
10525DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10526{
10527 if (rcStrict != VINF_SUCCESS)
10528 {
10529 if (RT_SUCCESS(rcStrict))
10530 {
10531 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10532 || rcStrict == VINF_IOM_R3_IOPORT_READ
10533 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10534 || rcStrict == VINF_IOM_R3_MMIO_READ
10535 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10536 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10537 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10538 int32_t const rcPassUp = pIemCpu->rcPassUp;
10539 if (rcPassUp == VINF_SUCCESS)
10540 pIemCpu->cRetInfStatuses++;
10541 else if ( rcPassUp < VINF_EM_FIRST
10542 || rcPassUp > VINF_EM_LAST
10543 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10544 {
10545 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10546 pIemCpu->cRetPassUpStatus++;
10547 rcStrict = rcPassUp;
10548 }
10549 else
10550 {
10551 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10552 pIemCpu->cRetInfStatuses++;
10553 }
10554 }
10555 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10556 pIemCpu->cRetAspectNotImplemented++;
10557 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10558 pIemCpu->cRetInstrNotImplemented++;
10559#ifdef IEM_VERIFICATION_MODE_FULL
10560 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10561 rcStrict = VINF_SUCCESS;
10562#endif
10563 else
10564 pIemCpu->cRetErrStatuses++;
10565 }
10566 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10567 {
10568 pIemCpu->cRetPassUpStatus++;
10569 rcStrict = pIemCpu->rcPassUp;
10570 }
10571
10572 return rcStrict;
10573}
10574
10575
10576/**
10577 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10578 * IEMExecOneWithPrefetchedByPC.
10579 *
10580 * @return Strict VBox status code.
10581 * @param pVCpu The current virtual CPU.
10582 * @param pIemCpu The IEM per CPU data.
10583 * @param fExecuteInhibit If set, execute the instruction following CLI,
10584 * POP SS and MOV SS,GR.
10585 */
10586DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10587{
10588 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10589 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10590 if (rcStrict == VINF_SUCCESS)
10591 pIemCpu->cInstructions++;
10592 if (pIemCpu->cActiveMappings > 0)
10593 iemMemRollback(pIemCpu);
10594//#ifdef DEBUG
10595// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10596//#endif
10597
10598 /* Execute the next instruction as well if a cli, pop ss or
10599 mov ss, Gr has just completed successfully. */
10600 if ( fExecuteInhibit
10601 && rcStrict == VINF_SUCCESS
10602 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10603 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10604 {
10605 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10606 if (rcStrict == VINF_SUCCESS)
10607 {
10608# ifdef LOG_ENABLED
10609 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10610# endif
10611 IEM_OPCODE_GET_NEXT_U8(&b);
10612 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10613 if (rcStrict == VINF_SUCCESS)
10614 pIemCpu->cInstructions++;
10615 if (pIemCpu->cActiveMappings > 0)
10616 iemMemRollback(pIemCpu);
10617 }
10618 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10619 }
10620
10621 /*
10622 * Return value fiddling, statistics and sanity assertions.
10623 */
10624 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10625
10626 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10627 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10628#if defined(IEM_VERIFICATION_MODE_FULL)
10629 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10630 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10631 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10632 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10633#endif
10634 return rcStrict;
10635}
10636
10637
10638#ifdef IN_RC
10639/**
10640 * Re-enters raw-mode or ensure we return to ring-3.
10641 *
10642 * @returns rcStrict, maybe modified.
10643 * @param pIemCpu The IEM CPU structure.
10644 * @param pVCpu The cross context virtual CPU structure of the caller.
10645 * @param pCtx The current CPU context.
10646 * @param rcStrict The status code returne by the interpreter.
10647 */
10648DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10649{
10650 if (!pIemCpu->fInPatchCode)
10651 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
10652 return rcStrict;
10653}
10654#endif
10655
10656
10657/**
10658 * Execute one instruction.
10659 *
10660 * @return Strict VBox status code.
10661 * @param pVCpu The current virtual CPU.
10662 */
10663VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10664{
10665 PIEMCPU pIemCpu = &pVCpu->iem.s;
10666
10667#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10668 iemExecVerificationModeSetup(pIemCpu);
10669#endif
10670#ifdef LOG_ENABLED
10671 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10672 iemLogCurInstr(pVCpu, pCtx, true);
10673#endif
10674
10675 /*
10676 * Do the decoding and emulation.
10677 */
10678 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10679 if (rcStrict == VINF_SUCCESS)
10680 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10681
10682#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10683 /*
10684 * Assert some sanity.
10685 */
10686 iemExecVerificationModeCheck(pIemCpu);
10687#endif
10688#ifdef IN_RC
10689 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10690#endif
10691 if (rcStrict != VINF_SUCCESS)
10692 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10693 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10694 return rcStrict;
10695}
10696
10697
10698VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10699{
10700 PIEMCPU pIemCpu = &pVCpu->iem.s;
10701 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10702 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10703
10704 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10705 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10706 if (rcStrict == VINF_SUCCESS)
10707 {
10708 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10709 if (pcbWritten)
10710 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10711 }
10712
10713#ifdef IN_RC
10714 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10715#endif
10716 return rcStrict;
10717}
10718
10719
10720VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10721 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10722{
10723 PIEMCPU pIemCpu = &pVCpu->iem.s;
10724 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10725 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10726
10727 VBOXSTRICTRC rcStrict;
10728 if ( cbOpcodeBytes
10729 && pCtx->rip == OpcodeBytesPC)
10730 {
10731 iemInitDecoder(pIemCpu, false);
10732 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10733 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10734 rcStrict = VINF_SUCCESS;
10735 }
10736 else
10737 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10738 if (rcStrict == VINF_SUCCESS)
10739 {
10740 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10741 }
10742
10743#ifdef IN_RC
10744 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10745#endif
10746 return rcStrict;
10747}
10748
10749
10750VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10751{
10752 PIEMCPU pIemCpu = &pVCpu->iem.s;
10753 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10754 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10755
10756 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10757 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10758 if (rcStrict == VINF_SUCCESS)
10759 {
10760 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10761 if (pcbWritten)
10762 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10763 }
10764
10765#ifdef IN_RC
10766 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10767#endif
10768 return rcStrict;
10769}
10770
10771
10772VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10773 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10774{
10775 PIEMCPU pIemCpu = &pVCpu->iem.s;
10776 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10777 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10778
10779 VBOXSTRICTRC rcStrict;
10780 if ( cbOpcodeBytes
10781 && pCtx->rip == OpcodeBytesPC)
10782 {
10783 iemInitDecoder(pIemCpu, true);
10784 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10785 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10786 rcStrict = VINF_SUCCESS;
10787 }
10788 else
10789 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10790 if (rcStrict == VINF_SUCCESS)
10791 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10792
10793#ifdef IN_RC
10794 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10795#endif
10796 return rcStrict;
10797}
10798
10799
10800VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
10801{
10802 PIEMCPU pIemCpu = &pVCpu->iem.s;
10803
10804 /*
10805 * See if there is an interrupt pending in TRPM and inject it if we can.
10806 */
10807#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
10808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10809# ifdef IEM_VERIFICATION_MODE_FULL
10810 pIemCpu->uInjectCpl = UINT8_MAX;
10811# endif
10812 if ( pCtx->eflags.Bits.u1IF
10813 && TRPMHasTrap(pVCpu)
10814 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
10815 {
10816 uint8_t u8TrapNo;
10817 TRPMEVENT enmType;
10818 RTGCUINT uErrCode;
10819 RTGCPTR uCr2;
10820 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
10821 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10822 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10823 TRPMResetTrap(pVCpu);
10824 }
10825#else
10826 iemExecVerificationModeSetup(pIemCpu);
10827 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10828#endif
10829
10830 /*
10831 * Log the state.
10832 */
10833#ifdef LOG_ENABLED
10834 iemLogCurInstr(pVCpu, pCtx, true);
10835#endif
10836
10837 /*
10838 * Do the decoding and emulation.
10839 */
10840 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10841 if (rcStrict == VINF_SUCCESS)
10842 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10843
10844#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10845 /*
10846 * Assert some sanity.
10847 */
10848 iemExecVerificationModeCheck(pIemCpu);
10849#endif
10850
10851 /*
10852 * Maybe re-enter raw-mode and log.
10853 */
10854#ifdef IN_RC
10855 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10856#endif
10857 if (rcStrict != VINF_SUCCESS)
10858 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10859 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10860 return rcStrict;
10861}
10862
10863
10864
10865/**
10866 * Injects a trap, fault, abort, software interrupt or external interrupt.
10867 *
10868 * The parameter list matches TRPMQueryTrapAll pretty closely.
10869 *
10870 * @returns Strict VBox status code.
10871 * @param pVCpu The current virtual CPU.
10872 * @param u8TrapNo The trap number.
10873 * @param enmType What type is it (trap/fault/abort), software
10874 * interrupt or hardware interrupt.
10875 * @param uErrCode The error code if applicable.
10876 * @param uCr2 The CR2 value if applicable.
10877 * @param cbInstr The instruction length (only relevant for
10878 * software interrupts).
10879 */
10880VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10881 uint8_t cbInstr)
10882{
10883 iemInitDecoder(&pVCpu->iem.s, false);
10884#ifdef DBGFTRACE_ENABLED
10885 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10886 u8TrapNo, enmType, uErrCode, uCr2);
10887#endif
10888
10889 uint32_t fFlags;
10890 switch (enmType)
10891 {
10892 case TRPM_HARDWARE_INT:
10893 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10894 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10895 uErrCode = uCr2 = 0;
10896 break;
10897
10898 case TRPM_SOFTWARE_INT:
10899 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10900 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10901 uErrCode = uCr2 = 0;
10902 break;
10903
10904 case TRPM_TRAP:
10905 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10906 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10907 if (u8TrapNo == X86_XCPT_PF)
10908 fFlags |= IEM_XCPT_FLAGS_CR2;
10909 switch (u8TrapNo)
10910 {
10911 case X86_XCPT_DF:
10912 case X86_XCPT_TS:
10913 case X86_XCPT_NP:
10914 case X86_XCPT_SS:
10915 case X86_XCPT_PF:
10916 case X86_XCPT_AC:
10917 fFlags |= IEM_XCPT_FLAGS_ERR;
10918 break;
10919 }
10920 break;
10921
10922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10923 }
10924
10925 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10926}
10927
10928
10929/**
10930 * Injects the active TRPM event.
10931 *
10932 * @returns Strict VBox status code.
10933 * @param pVCpu Pointer to the VMCPU.
10934 */
10935VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
10936{
10937#ifndef IEM_IMPLEMENTS_TASKSWITCH
10938 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10939#else
10940 uint8_t u8TrapNo;
10941 TRPMEVENT enmType;
10942 RTGCUINT uErrCode;
10943 RTGCUINTPTR uCr2;
10944 uint8_t cbInstr;
10945 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
10946 if (RT_FAILURE(rc))
10947 return rc;
10948
10949 TRPMResetTrap(pVCpu);
10950 return IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10951#endif
10952}
10953
10954
10955VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10956{
10957 return VERR_NOT_IMPLEMENTED;
10958}
10959
10960
10961VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10962{
10963 return VERR_NOT_IMPLEMENTED;
10964}
10965
10966
10967#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10968/**
10969 * Executes a IRET instruction with default operand size.
10970 *
10971 * This is for PATM.
10972 *
10973 * @returns VBox status code.
10974 * @param pVCpu The current virtual CPU.
10975 * @param pCtxCore The register frame.
10976 */
10977VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
10978{
10979 PIEMCPU pIemCpu = &pVCpu->iem.s;
10980 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10981
10982 iemCtxCoreToCtx(pCtx, pCtxCore);
10983 iemInitDecoder(pIemCpu);
10984 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
10985 if (rcStrict == VINF_SUCCESS)
10986 iemCtxToCtxCore(pCtxCore, pCtx);
10987 else
10988 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10989 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10990 return rcStrict;
10991}
10992#endif
10993
10994
10995
10996/**
10997 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10998 *
10999 * This API ASSUMES that the caller has already verified that the guest code is
11000 * allowed to access the I/O port. (The I/O port is in the DX register in the
11001 * guest state.)
11002 *
11003 * @returns Strict VBox status code.
11004 * @param pVCpu The cross context per virtual CPU structure.
11005 * @param cbValue The size of the I/O port access (1, 2, or 4).
11006 * @param enmAddrMode The addressing mode.
11007 * @param fRepPrefix Indicates whether a repeat prefix is used
11008 * (doesn't matter which for this instruction).
11009 * @param cbInstr The instruction length in bytes.
11010 * @param iEffSeg The effective segment address.
11011 */
11012VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11013 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11014{
11015 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11016 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11017
11018 /*
11019 * State init.
11020 */
11021 PIEMCPU pIemCpu = &pVCpu->iem.s;
11022 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11023
11024 /*
11025 * Switch orgy for getting to the right handler.
11026 */
11027 VBOXSTRICTRC rcStrict;
11028 if (fRepPrefix)
11029 {
11030 switch (enmAddrMode)
11031 {
11032 case IEMMODE_16BIT:
11033 switch (cbValue)
11034 {
11035 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11036 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11037 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11038 default:
11039 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11040 }
11041 break;
11042
11043 case IEMMODE_32BIT:
11044 switch (cbValue)
11045 {
11046 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11047 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11048 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11049 default:
11050 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11051 }
11052 break;
11053
11054 case IEMMODE_64BIT:
11055 switch (cbValue)
11056 {
11057 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11058 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11059 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11060 default:
11061 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11062 }
11063 break;
11064
11065 default:
11066 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11067 }
11068 }
11069 else
11070 {
11071 switch (enmAddrMode)
11072 {
11073 case IEMMODE_16BIT:
11074 switch (cbValue)
11075 {
11076 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11077 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11078 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11079 default:
11080 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11081 }
11082 break;
11083
11084 case IEMMODE_32BIT:
11085 switch (cbValue)
11086 {
11087 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11088 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11089 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11090 default:
11091 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11092 }
11093 break;
11094
11095 case IEMMODE_64BIT:
11096 switch (cbValue)
11097 {
11098 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11099 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11100 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11101 default:
11102 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11103 }
11104 break;
11105
11106 default:
11107 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11108 }
11109 }
11110
11111 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11112}
11113
11114
11115/**
11116 * Interface for HM and EM for executing string I/O IN (read) instructions.
11117 *
11118 * This API ASSUMES that the caller has already verified that the guest code is
11119 * allowed to access the I/O port. (The I/O port is in the DX register in the
11120 * guest state.)
11121 *
11122 * @returns Strict VBox status code.
11123 * @param pVCpu The cross context per virtual CPU structure.
11124 * @param cbValue The size of the I/O port access (1, 2, or 4).
11125 * @param enmAddrMode The addressing mode.
11126 * @param fRepPrefix Indicates whether a repeat prefix is used
11127 * (doesn't matter which for this instruction).
11128 * @param cbInstr The instruction length in bytes.
11129 */
11130VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11131 bool fRepPrefix, uint8_t cbInstr)
11132{
11133 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
11134
11135 /*
11136 * State init.
11137 */
11138 PIEMCPU pIemCpu = &pVCpu->iem.s;
11139 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11140
11141 /*
11142 * Switch orgy for getting to the right handler.
11143 */
11144 VBOXSTRICTRC rcStrict;
11145 if (fRepPrefix)
11146 {
11147 switch (enmAddrMode)
11148 {
11149 case IEMMODE_16BIT:
11150 switch (cbValue)
11151 {
11152 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11153 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11154 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11155 default:
11156 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11157 }
11158 break;
11159
11160 case IEMMODE_32BIT:
11161 switch (cbValue)
11162 {
11163 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11164 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11165 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11166 default:
11167 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11168 }
11169 break;
11170
11171 case IEMMODE_64BIT:
11172 switch (cbValue)
11173 {
11174 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11175 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11176 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11177 default:
11178 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11179 }
11180 break;
11181
11182 default:
11183 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11184 }
11185 }
11186 else
11187 {
11188 switch (enmAddrMode)
11189 {
11190 case IEMMODE_16BIT:
11191 switch (cbValue)
11192 {
11193 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11194 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11195 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11196 default:
11197 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11198 }
11199 break;
11200
11201 case IEMMODE_32BIT:
11202 switch (cbValue)
11203 {
11204 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11205 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11206 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11207 default:
11208 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11209 }
11210 break;
11211
11212 case IEMMODE_64BIT:
11213 switch (cbValue)
11214 {
11215 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11216 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11217 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11218 default:
11219 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11220 }
11221 break;
11222
11223 default:
11224 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11225 }
11226 }
11227
11228 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11229}
11230
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette