VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 48569

Last change on this file since 48569 was 47819, checked in by vboxsync, 11 years ago

IEM: Read unpatched bytes. Useless (disabled for ages) CSAM hooks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 377.8 KB
Line 
1/* $Id: IEMAll.cpp 47819 2013-08-16 19:45:01Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76//#define IEM_VERIFICATION_MODE_MINIMAL
77//#define IEM_LOG_MEMORY_WRITES
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_IEM
83#include <VBox/vmm/iem.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/pdm.h>
86#include <VBox/vmm/pgm.h>
87#include <internal/pgm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/em.h>
90#include <VBox/vmm/hm.h>
91#include <VBox/vmm/tm.h>
92#include <VBox/vmm/dbgf.h>
93#include <VBox/vmm/dbgftrace.h>
94#ifdef VBOX_WITH_RAW_MODE_NOT_R0
95# include <VBox/vmm/patm.h>
96# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
97# include <VBox/vmm/csam.h>
98# endif
99#endif
100#include "IEMInternal.h"
101#ifdef IEM_VERIFICATION_MODE_FULL
102# include <VBox/vmm/rem.h>
103# include <VBox/vmm/mm.h>
104#endif
105#include <VBox/vmm/vm.h>
106#include <VBox/log.h>
107#include <VBox/err.h>
108#include <VBox/param.h>
109#include <VBox/dis.h>
110#include <VBox/disopcode.h>
111#include <iprt/assert.h>
112#include <iprt/string.h>
113#include <iprt/x86.h>
114
115
116
117/*******************************************************************************
118* Structures and Typedefs *
119*******************************************************************************/
120/** @typedef PFNIEMOP
121 * Pointer to an opcode decoder function.
122 */
123
124/** @def FNIEMOP_DEF
125 * Define an opcode decoder function.
126 *
127 * We're using macors for this so that adding and removing parameters as well as
128 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
129 *
130 * @param a_Name The function name.
131 */
132
133
134#if defined(__GNUC__) && defined(RT_ARCH_X86)
135typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
136# define FNIEMOP_DEF(a_Name) \
137 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
138# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
139 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
140# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
141 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
142
143#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
144typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
145# define FNIEMOP_DEF(a_Name) \
146 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
147# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
148 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
149# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
150 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
151
152#elif defined(__GNUC__)
153typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
154# define FNIEMOP_DEF(a_Name) \
155 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
156# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
157 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
158# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
159 static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
160
161#else
162typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
163# define FNIEMOP_DEF(a_Name) \
164 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
165# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
166 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
167# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
168 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
169
170#endif
171
172
173/**
174 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
175 */
176typedef union IEMSELDESC
177{
178 /** The legacy view. */
179 X86DESC Legacy;
180 /** The long mode view. */
181 X86DESC64 Long;
182} IEMSELDESC;
183/** Pointer to a selector descriptor table entry. */
184typedef IEMSELDESC *PIEMSELDESC;
185
186
187/*******************************************************************************
188* Defined Constants And Macros *
189*******************************************************************************/
190/** @name IEM status codes.
191 *
192 * Not quite sure how this will play out in the end, just aliasing safe status
193 * codes for now.
194 *
195 * @{ */
196#define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE
197/** @} */
198
199/** Temporary hack to disable the double execution. Will be removed in favor
200 * of a dedicated execution mode in EM. */
201//#define IEM_VERIFICATION_MODE_NO_REM
202
203/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
204 * due to GCC lacking knowledge about the value range of a switch. */
205#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
206
207/**
208 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
209 * occation.
210 */
211#ifdef LOG_ENABLED
212# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
213 do { \
214 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
216 } while (0)
217#else
218# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
219 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
220#endif
221
222/**
223 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
224 * occation using the supplied logger statement.
225 *
226 * @param a_LoggerArgs What to log on failure.
227 */
228#ifdef LOG_ENABLED
229# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
230 do { \
231 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
232 /*LogFunc(a_LoggerArgs);*/ \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
234 } while (0)
235#else
236# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
237 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
238#endif
239
240/**
241 * Call an opcode decoder function.
242 *
243 * We're using macors for this so that adding and removing parameters can be
244 * done as we please. See FNIEMOP_DEF.
245 */
246#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
247
248/**
249 * Call a common opcode decoder function taking one extra argument.
250 *
251 * We're using macors for this so that adding and removing parameters can be
252 * done as we please. See FNIEMOP_DEF_1.
253 */
254#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
255
256/**
257 * Call a common opcode decoder function taking one extra argument.
258 *
259 * We're using macors for this so that adding and removing parameters can be
260 * done as we please. See FNIEMOP_DEF_1.
261 */
262#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
263
264/**
265 * Check if we're currently executing in real or virtual 8086 mode.
266 *
267 * @returns @c true if it is, @c false if not.
268 * @param a_pIemCpu The IEM state of the current CPU.
269 */
270#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
271
272/**
273 * Check if we're currently executing in long mode.
274 *
275 * @returns @c true if it is, @c false if not.
276 * @param a_pIemCpu The IEM state of the current CPU.
277 */
278#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
279
280/**
281 * Check if we're currently executing in real mode.
282 *
283 * @returns @c true if it is, @c false if not.
284 * @param a_pIemCpu The IEM state of the current CPU.
285 */
286#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
287
288/**
289 * Tests if an AMD CPUID feature (extended) is marked present - ECX.
290 */
291#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
292
293/**
294 * Tests if an AMD CPUID feature (extended) is marked present - EDX.
295 */
296#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
297
298/**
299 * Tests if at least on of the specified AMD CPUID features (extended) are
300 * marked present.
301 */
302#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
303
304/**
305 * Checks if an Intel CPUID feature is present.
306 */
307#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
308 ( ((a_fEdx) & (X86_CPUID_FEATURE_EDX_TSC | 0)) \
309 || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
310
311/**
312 * Checks if an Intel CPUID feature is present.
313 */
314#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX(a_fEcx) \
315 ( iemRegIsIntelCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx)) )
316
317/**
318 * Checks if an Intel CPUID feature is present in the host CPU.
319 */
320#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \
321 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx )
322
323/**
324 * Evaluates to true if we're presenting an Intel CPU to the guest.
325 */
326#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
327
328/**
329 * Evaluates to true if we're presenting an AMD CPU to the guest.
330 */
331#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
332
333/**
334 * Check if the address is canonical.
335 */
336#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
337
338
339/*******************************************************************************
340* Global Variables *
341*******************************************************************************/
342extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
343
344
345/** Function table for the ADD instruction. */
346static const IEMOPBINSIZES g_iemAImpl_add =
347{
348 iemAImpl_add_u8, iemAImpl_add_u8_locked,
349 iemAImpl_add_u16, iemAImpl_add_u16_locked,
350 iemAImpl_add_u32, iemAImpl_add_u32_locked,
351 iemAImpl_add_u64, iemAImpl_add_u64_locked
352};
353
354/** Function table for the ADC instruction. */
355static const IEMOPBINSIZES g_iemAImpl_adc =
356{
357 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
358 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
359 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
360 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
361};
362
363/** Function table for the SUB instruction. */
364static const IEMOPBINSIZES g_iemAImpl_sub =
365{
366 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
367 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
368 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
369 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
370};
371
372/** Function table for the SBB instruction. */
373static const IEMOPBINSIZES g_iemAImpl_sbb =
374{
375 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
376 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
377 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
378 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
379};
380
381/** Function table for the OR instruction. */
382static const IEMOPBINSIZES g_iemAImpl_or =
383{
384 iemAImpl_or_u8, iemAImpl_or_u8_locked,
385 iemAImpl_or_u16, iemAImpl_or_u16_locked,
386 iemAImpl_or_u32, iemAImpl_or_u32_locked,
387 iemAImpl_or_u64, iemAImpl_or_u64_locked
388};
389
390/** Function table for the XOR instruction. */
391static const IEMOPBINSIZES g_iemAImpl_xor =
392{
393 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
394 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
395 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
396 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
397};
398
399/** Function table for the AND instruction. */
400static const IEMOPBINSIZES g_iemAImpl_and =
401{
402 iemAImpl_and_u8, iemAImpl_and_u8_locked,
403 iemAImpl_and_u16, iemAImpl_and_u16_locked,
404 iemAImpl_and_u32, iemAImpl_and_u32_locked,
405 iemAImpl_and_u64, iemAImpl_and_u64_locked
406};
407
408/** Function table for the CMP instruction.
409 * @remarks Making operand order ASSUMPTIONS.
410 */
411static const IEMOPBINSIZES g_iemAImpl_cmp =
412{
413 iemAImpl_cmp_u8, NULL,
414 iemAImpl_cmp_u16, NULL,
415 iemAImpl_cmp_u32, NULL,
416 iemAImpl_cmp_u64, NULL
417};
418
419/** Function table for the TEST instruction.
420 * @remarks Making operand order ASSUMPTIONS.
421 */
422static const IEMOPBINSIZES g_iemAImpl_test =
423{
424 iemAImpl_test_u8, NULL,
425 iemAImpl_test_u16, NULL,
426 iemAImpl_test_u32, NULL,
427 iemAImpl_test_u64, NULL
428};
429
430/** Function table for the BT instruction. */
431static const IEMOPBINSIZES g_iemAImpl_bt =
432{
433 NULL, NULL,
434 iemAImpl_bt_u16, NULL,
435 iemAImpl_bt_u32, NULL,
436 iemAImpl_bt_u64, NULL
437};
438
439/** Function table for the BTC instruction. */
440static const IEMOPBINSIZES g_iemAImpl_btc =
441{
442 NULL, NULL,
443 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
444 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
445 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
446};
447
448/** Function table for the BTR instruction. */
449static const IEMOPBINSIZES g_iemAImpl_btr =
450{
451 NULL, NULL,
452 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
453 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
454 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
455};
456
457/** Function table for the BTS instruction. */
458static const IEMOPBINSIZES g_iemAImpl_bts =
459{
460 NULL, NULL,
461 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
462 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
463 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
464};
465
466/** Function table for the BSF instruction. */
467static const IEMOPBINSIZES g_iemAImpl_bsf =
468{
469 NULL, NULL,
470 iemAImpl_bsf_u16, NULL,
471 iemAImpl_bsf_u32, NULL,
472 iemAImpl_bsf_u64, NULL
473};
474
475/** Function table for the BSR instruction. */
476static const IEMOPBINSIZES g_iemAImpl_bsr =
477{
478 NULL, NULL,
479 iemAImpl_bsr_u16, NULL,
480 iemAImpl_bsr_u32, NULL,
481 iemAImpl_bsr_u64, NULL
482};
483
484/** Function table for the IMUL instruction. */
485static const IEMOPBINSIZES g_iemAImpl_imul_two =
486{
487 NULL, NULL,
488 iemAImpl_imul_two_u16, NULL,
489 iemAImpl_imul_two_u32, NULL,
490 iemAImpl_imul_two_u64, NULL
491};
492
493/** Group 1 /r lookup table. */
494static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
495{
496 &g_iemAImpl_add,
497 &g_iemAImpl_or,
498 &g_iemAImpl_adc,
499 &g_iemAImpl_sbb,
500 &g_iemAImpl_and,
501 &g_iemAImpl_sub,
502 &g_iemAImpl_xor,
503 &g_iemAImpl_cmp
504};
505
506/** Function table for the INC instruction. */
507static const IEMOPUNARYSIZES g_iemAImpl_inc =
508{
509 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
510 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
511 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
512 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
513};
514
515/** Function table for the DEC instruction. */
516static const IEMOPUNARYSIZES g_iemAImpl_dec =
517{
518 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
519 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
520 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
521 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
522};
523
524/** Function table for the NEG instruction. */
525static const IEMOPUNARYSIZES g_iemAImpl_neg =
526{
527 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
528 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
529 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
530 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
531};
532
533/** Function table for the NOT instruction. */
534static const IEMOPUNARYSIZES g_iemAImpl_not =
535{
536 iemAImpl_not_u8, iemAImpl_not_u8_locked,
537 iemAImpl_not_u16, iemAImpl_not_u16_locked,
538 iemAImpl_not_u32, iemAImpl_not_u32_locked,
539 iemAImpl_not_u64, iemAImpl_not_u64_locked
540};
541
542
543/** Function table for the ROL instruction. */
544static const IEMOPSHIFTSIZES g_iemAImpl_rol =
545{
546 iemAImpl_rol_u8,
547 iemAImpl_rol_u16,
548 iemAImpl_rol_u32,
549 iemAImpl_rol_u64
550};
551
552/** Function table for the ROR instruction. */
553static const IEMOPSHIFTSIZES g_iemAImpl_ror =
554{
555 iemAImpl_ror_u8,
556 iemAImpl_ror_u16,
557 iemAImpl_ror_u32,
558 iemAImpl_ror_u64
559};
560
561/** Function table for the RCL instruction. */
562static const IEMOPSHIFTSIZES g_iemAImpl_rcl =
563{
564 iemAImpl_rcl_u8,
565 iemAImpl_rcl_u16,
566 iemAImpl_rcl_u32,
567 iemAImpl_rcl_u64
568};
569
570/** Function table for the RCR instruction. */
571static const IEMOPSHIFTSIZES g_iemAImpl_rcr =
572{
573 iemAImpl_rcr_u8,
574 iemAImpl_rcr_u16,
575 iemAImpl_rcr_u32,
576 iemAImpl_rcr_u64
577};
578
579/** Function table for the SHL instruction. */
580static const IEMOPSHIFTSIZES g_iemAImpl_shl =
581{
582 iemAImpl_shl_u8,
583 iemAImpl_shl_u16,
584 iemAImpl_shl_u32,
585 iemAImpl_shl_u64
586};
587
588/** Function table for the SHR instruction. */
589static const IEMOPSHIFTSIZES g_iemAImpl_shr =
590{
591 iemAImpl_shr_u8,
592 iemAImpl_shr_u16,
593 iemAImpl_shr_u32,
594 iemAImpl_shr_u64
595};
596
597/** Function table for the SAR instruction. */
598static const IEMOPSHIFTSIZES g_iemAImpl_sar =
599{
600 iemAImpl_sar_u8,
601 iemAImpl_sar_u16,
602 iemAImpl_sar_u32,
603 iemAImpl_sar_u64
604};
605
606
607/** Function table for the MUL instruction. */
608static const IEMOPMULDIVSIZES g_iemAImpl_mul =
609{
610 iemAImpl_mul_u8,
611 iemAImpl_mul_u16,
612 iemAImpl_mul_u32,
613 iemAImpl_mul_u64
614};
615
616/** Function table for the IMUL instruction working implicitly on rAX. */
617static const IEMOPMULDIVSIZES g_iemAImpl_imul =
618{
619 iemAImpl_imul_u8,
620 iemAImpl_imul_u16,
621 iemAImpl_imul_u32,
622 iemAImpl_imul_u64
623};
624
625/** Function table for the DIV instruction. */
626static const IEMOPMULDIVSIZES g_iemAImpl_div =
627{
628 iemAImpl_div_u8,
629 iemAImpl_div_u16,
630 iemAImpl_div_u32,
631 iemAImpl_div_u64
632};
633
634/** Function table for the MUL instruction. */
635static const IEMOPMULDIVSIZES g_iemAImpl_idiv =
636{
637 iemAImpl_idiv_u8,
638 iemAImpl_idiv_u16,
639 iemAImpl_idiv_u32,
640 iemAImpl_idiv_u64
641};
642
643/** Function table for the SHLD instruction */
644static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
645{
646 iemAImpl_shld_u16,
647 iemAImpl_shld_u32,
648 iemAImpl_shld_u64,
649};
650
651/** Function table for the SHRD instruction */
652static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
653{
654 iemAImpl_shrd_u16,
655 iemAImpl_shrd_u32,
656 iemAImpl_shrd_u64,
657};
658
659
660/** Function table for the PUNPCKLBW instruction */
661static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
662/** Function table for the PUNPCKLBD instruction */
663static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
664/** Function table for the PUNPCKLDQ instruction */
665static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
666/** Function table for the PUNPCKLQDQ instruction */
667static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
668
669/** Function table for the PUNPCKHBW instruction */
670static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
671/** Function table for the PUNPCKHBD instruction */
672static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
673/** Function table for the PUNPCKHDQ instruction */
674static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
675/** Function table for the PUNPCKHQDQ instruction */
676static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
677
678/** Function table for the PXOR instruction */
679static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
680/** Function table for the PCMPEQB instruction */
681static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
682/** Function table for the PCMPEQW instruction */
683static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
684/** Function table for the PCMPEQD instruction */
685static const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
686
687
688#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
689/** What IEM just wrote. */
690uint8_t g_abIemWrote[256];
691/** How much IEM just wrote. */
692size_t g_cbIemWrote;
693#endif
694
695
696/*******************************************************************************
697* Internal Functions *
698*******************************************************************************/
699static VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
700static VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
701static VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
702/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
703static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
704static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
705static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
706static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
707static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
708static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
709static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
710static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
711static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
712static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
713static VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
714static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
715static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
716static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
717static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
718static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
719static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
720static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
721static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
722static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
723static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
724static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
725static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
726static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
727
728#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
729static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
730#endif
731static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
732static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
733
734
735
736/**
737 * Sets the pass up status.
738 *
739 * @returns VINF_SUCCESS.
740 * @param pIemCpu The per CPU IEM state of the calling thread.
741 * @param rcPassUp The pass up status. Must be informational.
742 * VINF_SUCCESS is not allowed.
743 */
744static int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
745{
746 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
747
748 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
749 if (rcOldPassUp == VINF_SUCCESS)
750 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
751 /* If both are EM scheduling codes, use EM priority rules. */
752 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
753 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
754 {
755 if (rcPassUp < rcOldPassUp)
756 {
757 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
758 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
759 }
760 else
761 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
762 }
763 /* Override EM scheduling with specific status code. */
764 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
765 {
766 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
767 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
768 }
769 /* Don't override specific status code, first come first served. */
770 else
771 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Initializes the execution state.
778 *
779 * @param pIemCpu The per CPU IEM state.
780 * @param fBypassHandlers Whether to bypass access handlers.
781 */
782DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
783{
784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
785 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
786
787#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
788 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
789 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
790 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
791 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
792 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
793 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
795 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
796#endif
797
798#ifdef VBOX_WITH_RAW_MODE_NOT_R0
799 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
800#endif
801 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
802 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
803 ? IEMMODE_64BIT
804 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
805 ? IEMMODE_32BIT
806 : IEMMODE_16BIT;
807 pIemCpu->enmCpuMode = enmMode;
808#ifdef VBOX_STRICT
809 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
810 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
811 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
812 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
813 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
814 pIemCpu->uRexReg = 127;
815 pIemCpu->uRexB = 127;
816 pIemCpu->uRexIndex = 127;
817 pIemCpu->iEffSeg = 127;
818 pIemCpu->offOpcode = 127;
819 pIemCpu->cbOpcode = 127;
820#endif
821
822 pIemCpu->cActiveMappings = 0;
823 pIemCpu->iNextMapping = 0;
824 pIemCpu->rcPassUp = VINF_SUCCESS;
825 pIemCpu->fBypassHandlers = fBypassHandlers;
826#ifdef VBOX_WITH_RAW_MODE_NOT_R0
827 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
828 && pCtx->cs.u64Base == 0
829 && pCtx->cs.u32Limit == UINT32_MAX
830 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
831 if (!pIemCpu->fInPatchCode)
832 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
833#endif
834}
835
836
837/**
838 * Initializes the decoder state.
839 *
840 * @param pIemCpu The per CPU IEM state.
841 * @param fBypassHandlers Whether to bypass access handlers.
842 */
843DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
844{
845 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
846 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
847
848#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
857#endif
858
859#ifdef VBOX_WITH_RAW_MODE_NOT_R0
860 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
861#endif
862 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
863#ifdef IEM_VERIFICATION_MODE_FULL
864 if (pIemCpu->uInjectCpl != UINT8_MAX)
865 pIemCpu->uCpl = pIemCpu->uInjectCpl;
866#endif
867 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
868 ? IEMMODE_64BIT
869 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
870 ? IEMMODE_32BIT
871 : IEMMODE_16BIT;
872 pIemCpu->enmCpuMode = enmMode;
873 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
874 pIemCpu->enmEffAddrMode = enmMode;
875 if (enmMode != IEMMODE_64BIT)
876 {
877 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
878 pIemCpu->enmEffOpSize = enmMode;
879 }
880 else
881 {
882 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
883 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
884 }
885 pIemCpu->fPrefixes = 0;
886 pIemCpu->uRexReg = 0;
887 pIemCpu->uRexB = 0;
888 pIemCpu->uRexIndex = 0;
889 pIemCpu->iEffSeg = X86_SREG_DS;
890 pIemCpu->offOpcode = 0;
891 pIemCpu->cbOpcode = 0;
892 pIemCpu->cActiveMappings = 0;
893 pIemCpu->iNextMapping = 0;
894 pIemCpu->rcPassUp = VINF_SUCCESS;
895 pIemCpu->fBypassHandlers = fBypassHandlers;
896#ifdef VBOX_WITH_RAW_MODE_NOT_R0
897 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
898 && pCtx->cs.u64Base == 0
899 && pCtx->cs.u32Limit == UINT32_MAX
900 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
901 if (!pIemCpu->fInPatchCode)
902 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
903#endif
904
905#ifdef DBGFTRACE_ENABLED
906 switch (enmMode)
907 {
908 case IEMMODE_64BIT:
909 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
910 break;
911 case IEMMODE_32BIT:
912 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
913 break;
914 case IEMMODE_16BIT:
915 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
916 break;
917 }
918#endif
919}
920
921
922/**
923 * Prefetch opcodes the first time when starting executing.
924 *
925 * @returns Strict VBox status code.
926 * @param pIemCpu The IEM state.
927 * @param fBypassHandlers Whether to bypass access handlers.
928 */
929static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
930{
931#ifdef IEM_VERIFICATION_MODE_FULL
932 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
933#endif
934 iemInitDecoder(pIemCpu, fBypassHandlers);
935
936 /*
937 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
938 *
939 * First translate CS:rIP to a physical address.
940 */
941 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
942 uint32_t cbToTryRead;
943 RTGCPTR GCPtrPC;
944 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
945 {
946 cbToTryRead = PAGE_SIZE;
947 GCPtrPC = pCtx->rip;
948 if (!IEM_IS_CANONICAL(GCPtrPC))
949 return iemRaiseGeneralProtectionFault0(pIemCpu);
950 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
951 }
952 else
953 {
954 uint32_t GCPtrPC32 = pCtx->eip;
955 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
956 if (GCPtrPC32 > pCtx->cs.u32Limit)
957 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
958 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
959 if (!cbToTryRead) /* overflowed */
960 {
961 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
962 cbToTryRead = UINT32_MAX;
963 }
964 GCPtrPC = pCtx->cs.u64Base + GCPtrPC32;
965 }
966
967#ifdef VBOX_WITH_RAW_MODE_NOT_R0
968 /* Allow interpretation of patch manager code blocks since they can for
969 instance throw #PFs for perfectly good reasons. */
970 if (pIemCpu->fInPatchCode)
971 {
972 size_t cbRead = 0;
973 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
974 AssertRCReturn(rc, rc);
975 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
976 return VINF_SUCCESS;
977 }
978#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
979
980 RTGCPHYS GCPhys;
981 uint64_t fFlags;
982 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
983 if (RT_FAILURE(rc))
984 {
985 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
986 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
987 }
988 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
989 {
990 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
991 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
992 }
993 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
994 {
995 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
996 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
997 }
998 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
999 /** @todo Check reserved bits and such stuff. PGM is better at doing
1000 * that, so do it when implementing the guest virtual address
1001 * TLB... */
1002
1003#ifdef IEM_VERIFICATION_MODE_FULL
1004 /*
1005 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1006 * instruction.
1007 */
1008 /** @todo optimize this differently by not using PGMPhysRead. */
1009 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1010 pIemCpu->GCPhysOpcodes = GCPhys;
1011 if ( offPrevOpcodes < cbOldOpcodes
1012 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1013 {
1014 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1015 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1016 pIemCpu->cbOpcode = cbNew;
1017 return VINF_SUCCESS;
1018 }
1019#endif
1020
1021 /*
1022 * Read the bytes at this address.
1023 */
1024 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1025#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1026 size_t cbActual;
1027 if ( PATMIsEnabled(pVM)
1028 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1029 {
1030 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1031 Assert(cbActual > 0);
1032 pIemCpu->cbOpcode = (uint8_t)cbActual;
1033 }
1034 else
1035#endif
1036 {
1037 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1038 if (cbToTryRead > cbLeftOnPage)
1039 cbToTryRead = cbLeftOnPage;
1040 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1041 cbToTryRead = sizeof(pIemCpu->abOpcode);
1042
1043 if (!pIemCpu->fBypassHandlers)
1044 rc = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead);
1045 else
1046 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1047 if (rc != VINF_SUCCESS)
1048 {
1049 /** @todo status code handling */
1050 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1051 GCPtrPC, GCPhys, rc, cbToTryRead));
1052 return rc;
1053 }
1054 pIemCpu->cbOpcode = cbToTryRead;
1055 }
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1063 * exception if it fails.
1064 *
1065 * @returns Strict VBox status code.
1066 * @param pIemCpu The IEM state.
1067 * @param cbMin The minimum number of bytes relative offOpcode
1068 * that must be read.
1069 */
1070static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1071{
1072 /*
1073 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1074 *
1075 * First translate CS:rIP to a physical address.
1076 */
1077 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1078 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1079 uint32_t cbToTryRead;
1080 RTGCPTR GCPtrNext;
1081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1082 {
1083 cbToTryRead = PAGE_SIZE;
1084 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1085 if (!IEM_IS_CANONICAL(GCPtrNext))
1086 return iemRaiseGeneralProtectionFault0(pIemCpu);
1087 }
1088 else
1089 {
1090 uint32_t GCPtrNext32 = pCtx->eip;
1091 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1092 GCPtrNext32 += pIemCpu->cbOpcode;
1093 if (GCPtrNext32 > pCtx->cs.u32Limit)
1094 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1095 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1096 if (!cbToTryRead) /* overflowed */
1097 {
1098 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1099 cbToTryRead = UINT32_MAX;
1100 /** @todo check out wrapping around the code segment. */
1101 }
1102 if (cbToTryRead < cbMin - cbLeft)
1103 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1104 GCPtrNext = pCtx->cs.u64Base + GCPtrNext32;
1105 }
1106
1107 /* Only read up to the end of the page, and make sure we don't read more
1108 than the opcode buffer can hold. */
1109 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1110 if (cbToTryRead > cbLeftOnPage)
1111 cbToTryRead = cbLeftOnPage;
1112 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1113 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1114 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1115
1116#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1117 /* Allow interpretation of patch manager code blocks since they can for
1118 instance throw #PFs for perfectly good reasons. */
1119 if (pIemCpu->fInPatchCode)
1120 {
1121 size_t cbRead = 0;
1122 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1123 AssertRCReturn(rc, rc);
1124 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1125 return VINF_SUCCESS;
1126 }
1127#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1128
1129 RTGCPHYS GCPhys;
1130 uint64_t fFlags;
1131 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1132 if (RT_FAILURE(rc))
1133 {
1134 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1135 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1136 }
1137 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1138 {
1139 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1140 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1141 }
1142 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1143 {
1144 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1145 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1146 }
1147 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1148 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1149 /** @todo Check reserved bits and such stuff. PGM is better at doing
1150 * that, so do it when implementing the guest virtual address
1151 * TLB... */
1152
1153 /*
1154 * Read the bytes at this address.
1155 *
1156 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1157 * and since PATM should only patch the start of an instruction there
1158 * should be no need to check again here.
1159 */
1160 if (!pIemCpu->fBypassHandlers)
1161 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
1162 else
1163 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1164 if (rc != VINF_SUCCESS)
1165 {
1166 /** @todo status code handling */
1167 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1168 return rc;
1169 }
1170 pIemCpu->cbOpcode += cbToTryRead;
1171 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1172
1173 return VINF_SUCCESS;
1174}
1175
1176
1177/**
1178 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1179 *
1180 * @returns Strict VBox status code.
1181 * @param pIemCpu The IEM state.
1182 * @param pb Where to return the opcode byte.
1183 */
1184DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1185{
1186 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1187 if (rcStrict == VINF_SUCCESS)
1188 {
1189 uint8_t offOpcode = pIemCpu->offOpcode;
1190 *pb = pIemCpu->abOpcode[offOpcode];
1191 pIemCpu->offOpcode = offOpcode + 1;
1192 }
1193 else
1194 *pb = 0;
1195 return rcStrict;
1196}
1197
1198
1199/**
1200 * Fetches the next opcode byte.
1201 *
1202 * @returns Strict VBox status code.
1203 * @param pIemCpu The IEM state.
1204 * @param pu8 Where to return the opcode byte.
1205 */
1206DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1207{
1208 uint8_t const offOpcode = pIemCpu->offOpcode;
1209 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1210 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1211
1212 *pu8 = pIemCpu->abOpcode[offOpcode];
1213 pIemCpu->offOpcode = offOpcode + 1;
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * Fetches the next opcode byte, returns automatically on failure.
1220 *
1221 * @param a_pu8 Where to return the opcode byte.
1222 * @remark Implicitly references pIemCpu.
1223 */
1224#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1225 do \
1226 { \
1227 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1228 if (rcStrict2 != VINF_SUCCESS) \
1229 return rcStrict2; \
1230 } while (0)
1231
1232
1233/**
1234 * Fetches the next signed byte from the opcode stream.
1235 *
1236 * @returns Strict VBox status code.
1237 * @param pIemCpu The IEM state.
1238 * @param pi8 Where to return the signed byte.
1239 */
1240DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1241{
1242 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1243}
1244
1245
1246/**
1247 * Fetches the next signed byte from the opcode stream, returning automatically
1248 * on failure.
1249 *
1250 * @param pi8 Where to return the signed byte.
1251 * @remark Implicitly references pIemCpu.
1252 */
1253#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1254 do \
1255 { \
1256 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1257 if (rcStrict2 != VINF_SUCCESS) \
1258 return rcStrict2; \
1259 } while (0)
1260
1261
1262/**
1263 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1264 *
1265 * @returns Strict VBox status code.
1266 * @param pIemCpu The IEM state.
1267 * @param pu16 Where to return the opcode dword.
1268 */
1269DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1270{
1271 uint8_t u8;
1272 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1273 if (rcStrict == VINF_SUCCESS)
1274 *pu16 = (int8_t)u8;
1275 return rcStrict;
1276}
1277
1278
1279/**
1280 * Fetches the next signed byte from the opcode stream, extending it to
1281 * unsigned 16-bit.
1282 *
1283 * @returns Strict VBox status code.
1284 * @param pIemCpu The IEM state.
1285 * @param pu16 Where to return the unsigned word.
1286 */
1287DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1288{
1289 uint8_t const offOpcode = pIemCpu->offOpcode;
1290 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1291 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1292
1293 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1294 pIemCpu->offOpcode = offOpcode + 1;
1295 return VINF_SUCCESS;
1296}
1297
1298
1299/**
1300 * Fetches the next signed byte from the opcode stream and sign-extending it to
1301 * a word, returning automatically on failure.
1302 *
1303 * @param pu16 Where to return the word.
1304 * @remark Implicitly references pIemCpu.
1305 */
1306#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1307 do \
1308 { \
1309 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1310 if (rcStrict2 != VINF_SUCCESS) \
1311 return rcStrict2; \
1312 } while (0)
1313
1314
1315/**
1316 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1317 *
1318 * @returns Strict VBox status code.
1319 * @param pIemCpu The IEM state.
1320 * @param pu32 Where to return the opcode dword.
1321 */
1322DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1323{
1324 uint8_t u8;
1325 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1326 if (rcStrict == VINF_SUCCESS)
1327 *pu32 = (int8_t)u8;
1328 return rcStrict;
1329}
1330
1331
1332/**
1333 * Fetches the next signed byte from the opcode stream, extending it to
1334 * unsigned 32-bit.
1335 *
1336 * @returns Strict VBox status code.
1337 * @param pIemCpu The IEM state.
1338 * @param pu32 Where to return the unsigned dword.
1339 */
1340DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1341{
1342 uint8_t const offOpcode = pIemCpu->offOpcode;
1343 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1344 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1345
1346 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1347 pIemCpu->offOpcode = offOpcode + 1;
1348 return VINF_SUCCESS;
1349}
1350
1351
1352/**
1353 * Fetches the next signed byte from the opcode stream and sign-extending it to
1354 * a word, returning automatically on failure.
1355 *
1356 * @param pu32 Where to return the word.
1357 * @remark Implicitly references pIemCpu.
1358 */
1359#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1360 do \
1361 { \
1362 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1363 if (rcStrict2 != VINF_SUCCESS) \
1364 return rcStrict2; \
1365 } while (0)
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pIemCpu The IEM state.
1373 * @param pu64 Where to return the opcode qword.
1374 */
1375DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu64 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Fetches the next signed byte from the opcode stream, extending it to
1387 * unsigned 64-bit.
1388 *
1389 * @returns Strict VBox status code.
1390 * @param pIemCpu The IEM state.
1391 * @param pu64 Where to return the unsigned qword.
1392 */
1393DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1394{
1395 uint8_t const offOpcode = pIemCpu->offOpcode;
1396 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1397 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1398
1399 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1400 pIemCpu->offOpcode = offOpcode + 1;
1401 return VINF_SUCCESS;
1402}
1403
1404
1405/**
1406 * Fetches the next signed byte from the opcode stream and sign-extending it to
1407 * a word, returning automatically on failure.
1408 *
1409 * @param pu64 Where to return the word.
1410 * @remark Implicitly references pIemCpu.
1411 */
1412#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1413 do \
1414 { \
1415 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1416 if (rcStrict2 != VINF_SUCCESS) \
1417 return rcStrict2; \
1418 } while (0)
1419
1420
1421/**
1422 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1423 *
1424 * @returns Strict VBox status code.
1425 * @param pIemCpu The IEM state.
1426 * @param pu16 Where to return the opcode word.
1427 */
1428DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1429{
1430 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1431 if (rcStrict == VINF_SUCCESS)
1432 {
1433 uint8_t offOpcode = pIemCpu->offOpcode;
1434 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1435 pIemCpu->offOpcode = offOpcode + 2;
1436 }
1437 else
1438 *pu16 = 0;
1439 return rcStrict;
1440}
1441
1442
1443/**
1444 * Fetches the next opcode word.
1445 *
1446 * @returns Strict VBox status code.
1447 * @param pIemCpu The IEM state.
1448 * @param pu16 Where to return the opcode word.
1449 */
1450DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1451{
1452 uint8_t const offOpcode = pIemCpu->offOpcode;
1453 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1454 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1455
1456 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1457 pIemCpu->offOpcode = offOpcode + 2;
1458 return VINF_SUCCESS;
1459}
1460
1461
1462/**
1463 * Fetches the next opcode word, returns automatically on failure.
1464 *
1465 * @param a_pu16 Where to return the opcode word.
1466 * @remark Implicitly references pIemCpu.
1467 */
1468#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1469 do \
1470 { \
1471 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1472 if (rcStrict2 != VINF_SUCCESS) \
1473 return rcStrict2; \
1474 } while (0)
1475
1476
1477/**
1478 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1479 *
1480 * @returns Strict VBox status code.
1481 * @param pIemCpu The IEM state.
1482 * @param pu32 Where to return the opcode double word.
1483 */
1484DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1485{
1486 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1487 if (rcStrict == VINF_SUCCESS)
1488 {
1489 uint8_t offOpcode = pIemCpu->offOpcode;
1490 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1491 pIemCpu->offOpcode = offOpcode + 2;
1492 }
1493 else
1494 *pu32 = 0;
1495 return rcStrict;
1496}
1497
1498
1499/**
1500 * Fetches the next opcode word, zero extending it to a double word.
1501 *
1502 * @returns Strict VBox status code.
1503 * @param pIemCpu The IEM state.
1504 * @param pu32 Where to return the opcode double word.
1505 */
1506DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1507{
1508 uint8_t const offOpcode = pIemCpu->offOpcode;
1509 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1510 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1511
1512 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1513 pIemCpu->offOpcode = offOpcode + 2;
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * Fetches the next opcode word and zero extends it to a double word, returns
1520 * automatically on failure.
1521 *
1522 * @param a_pu32 Where to return the opcode double word.
1523 * @remark Implicitly references pIemCpu.
1524 */
1525#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1526 do \
1527 { \
1528 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1529 if (rcStrict2 != VINF_SUCCESS) \
1530 return rcStrict2; \
1531 } while (0)
1532
1533
1534/**
1535 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1536 *
1537 * @returns Strict VBox status code.
1538 * @param pIemCpu The IEM state.
1539 * @param pu64 Where to return the opcode quad word.
1540 */
1541DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1542{
1543 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1544 if (rcStrict == VINF_SUCCESS)
1545 {
1546 uint8_t offOpcode = pIemCpu->offOpcode;
1547 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1548 pIemCpu->offOpcode = offOpcode + 2;
1549 }
1550 else
1551 *pu64 = 0;
1552 return rcStrict;
1553}
1554
1555
1556/**
1557 * Fetches the next opcode word, zero extending it to a quad word.
1558 *
1559 * @returns Strict VBox status code.
1560 * @param pIemCpu The IEM state.
1561 * @param pu64 Where to return the opcode quad word.
1562 */
1563DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1564{
1565 uint8_t const offOpcode = pIemCpu->offOpcode;
1566 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1567 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1568
1569 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1570 pIemCpu->offOpcode = offOpcode + 2;
1571 return VINF_SUCCESS;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word and zero extends it to a quad word, returns
1577 * automatically on failure.
1578 *
1579 * @param a_pu64 Where to return the opcode quad word.
1580 * @remark Implicitly references pIemCpu.
1581 */
1582#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1583 do \
1584 { \
1585 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1586 if (rcStrict2 != VINF_SUCCESS) \
1587 return rcStrict2; \
1588 } while (0)
1589
1590
1591/**
1592 * Fetches the next signed word from the opcode stream.
1593 *
1594 * @returns Strict VBox status code.
1595 * @param pIemCpu The IEM state.
1596 * @param pi16 Where to return the signed word.
1597 */
1598DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1599{
1600 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1601}
1602
1603
1604/**
1605 * Fetches the next signed word from the opcode stream, returning automatically
1606 * on failure.
1607 *
1608 * @param pi16 Where to return the signed word.
1609 * @remark Implicitly references pIemCpu.
1610 */
1611#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1612 do \
1613 { \
1614 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1615 if (rcStrict2 != VINF_SUCCESS) \
1616 return rcStrict2; \
1617 } while (0)
1618
1619
1620/**
1621 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1622 *
1623 * @returns Strict VBox status code.
1624 * @param pIemCpu The IEM state.
1625 * @param pu32 Where to return the opcode dword.
1626 */
1627DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1628{
1629 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1630 if (rcStrict == VINF_SUCCESS)
1631 {
1632 uint8_t offOpcode = pIemCpu->offOpcode;
1633 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1634 pIemCpu->abOpcode[offOpcode + 1],
1635 pIemCpu->abOpcode[offOpcode + 2],
1636 pIemCpu->abOpcode[offOpcode + 3]);
1637 pIemCpu->offOpcode = offOpcode + 4;
1638 }
1639 else
1640 *pu32 = 0;
1641 return rcStrict;
1642}
1643
1644
1645/**
1646 * Fetches the next opcode dword.
1647 *
1648 * @returns Strict VBox status code.
1649 * @param pIemCpu The IEM state.
1650 * @param pu32 Where to return the opcode double word.
1651 */
1652DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1653{
1654 uint8_t const offOpcode = pIemCpu->offOpcode;
1655 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1656 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1657
1658 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1659 pIemCpu->abOpcode[offOpcode + 1],
1660 pIemCpu->abOpcode[offOpcode + 2],
1661 pIemCpu->abOpcode[offOpcode + 3]);
1662 pIemCpu->offOpcode = offOpcode + 4;
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/**
1668 * Fetches the next opcode dword, returns automatically on failure.
1669 *
1670 * @param a_pu32 Where to return the opcode dword.
1671 * @remark Implicitly references pIemCpu.
1672 */
1673#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1674 do \
1675 { \
1676 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1677 if (rcStrict2 != VINF_SUCCESS) \
1678 return rcStrict2; \
1679 } while (0)
1680
1681
1682/**
1683 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1684 *
1685 * @returns Strict VBox status code.
1686 * @param pIemCpu The IEM state.
1687 * @param pu32 Where to return the opcode dword.
1688 */
1689DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1690{
1691 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1692 if (rcStrict == VINF_SUCCESS)
1693 {
1694 uint8_t offOpcode = pIemCpu->offOpcode;
1695 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1696 pIemCpu->abOpcode[offOpcode + 1],
1697 pIemCpu->abOpcode[offOpcode + 2],
1698 pIemCpu->abOpcode[offOpcode + 3]);
1699 pIemCpu->offOpcode = offOpcode + 4;
1700 }
1701 else
1702 *pu64 = 0;
1703 return rcStrict;
1704}
1705
1706
1707/**
1708 * Fetches the next opcode dword, zero extending it to a quad word.
1709 *
1710 * @returns Strict VBox status code.
1711 * @param pIemCpu The IEM state.
1712 * @param pu64 Where to return the opcode quad word.
1713 */
1714DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1715{
1716 uint8_t const offOpcode = pIemCpu->offOpcode;
1717 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1718 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1719
1720 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1721 pIemCpu->abOpcode[offOpcode + 1],
1722 pIemCpu->abOpcode[offOpcode + 2],
1723 pIemCpu->abOpcode[offOpcode + 3]);
1724 pIemCpu->offOpcode = offOpcode + 4;
1725 return VINF_SUCCESS;
1726}
1727
1728
1729/**
1730 * Fetches the next opcode dword and zero extends it to a quad word, returns
1731 * automatically on failure.
1732 *
1733 * @param a_pu64 Where to return the opcode quad word.
1734 * @remark Implicitly references pIemCpu.
1735 */
1736#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1737 do \
1738 { \
1739 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1740 if (rcStrict2 != VINF_SUCCESS) \
1741 return rcStrict2; \
1742 } while (0)
1743
1744
1745/**
1746 * Fetches the next signed double word from the opcode stream.
1747 *
1748 * @returns Strict VBox status code.
1749 * @param pIemCpu The IEM state.
1750 * @param pi32 Where to return the signed double word.
1751 */
1752DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1753{
1754 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1755}
1756
1757/**
1758 * Fetches the next signed double word from the opcode stream, returning
1759 * automatically on failure.
1760 *
1761 * @param pi32 Where to return the signed double word.
1762 * @remark Implicitly references pIemCpu.
1763 */
1764#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1765 do \
1766 { \
1767 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1768 if (rcStrict2 != VINF_SUCCESS) \
1769 return rcStrict2; \
1770 } while (0)
1771
1772
1773/**
1774 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1775 *
1776 * @returns Strict VBox status code.
1777 * @param pIemCpu The IEM state.
1778 * @param pu64 Where to return the opcode qword.
1779 */
1780DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1781{
1782 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1783 if (rcStrict == VINF_SUCCESS)
1784 {
1785 uint8_t offOpcode = pIemCpu->offOpcode;
1786 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1787 pIemCpu->abOpcode[offOpcode + 1],
1788 pIemCpu->abOpcode[offOpcode + 2],
1789 pIemCpu->abOpcode[offOpcode + 3]);
1790 pIemCpu->offOpcode = offOpcode + 4;
1791 }
1792 else
1793 *pu64 = 0;
1794 return rcStrict;
1795}
1796
1797
1798/**
1799 * Fetches the next opcode dword, sign extending it into a quad word.
1800 *
1801 * @returns Strict VBox status code.
1802 * @param pIemCpu The IEM state.
1803 * @param pu64 Where to return the opcode quad word.
1804 */
1805DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1806{
1807 uint8_t const offOpcode = pIemCpu->offOpcode;
1808 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1809 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1810
1811 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1812 pIemCpu->abOpcode[offOpcode + 1],
1813 pIemCpu->abOpcode[offOpcode + 2],
1814 pIemCpu->abOpcode[offOpcode + 3]);
1815 *pu64 = i32;
1816 pIemCpu->offOpcode = offOpcode + 4;
1817 return VINF_SUCCESS;
1818}
1819
1820
1821/**
1822 * Fetches the next opcode double word and sign extends it to a quad word,
1823 * returns automatically on failure.
1824 *
1825 * @param a_pu64 Where to return the opcode quad word.
1826 * @remark Implicitly references pIemCpu.
1827 */
1828#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1829 do \
1830 { \
1831 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1832 if (rcStrict2 != VINF_SUCCESS) \
1833 return rcStrict2; \
1834 } while (0)
1835
1836
1837/**
1838 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pIemCpu The IEM state.
1842 * @param pu64 Where to return the opcode qword.
1843 */
1844DECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1845{
1846 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1847 if (rcStrict == VINF_SUCCESS)
1848 {
1849 uint8_t offOpcode = pIemCpu->offOpcode;
1850 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1851 pIemCpu->abOpcode[offOpcode + 1],
1852 pIemCpu->abOpcode[offOpcode + 2],
1853 pIemCpu->abOpcode[offOpcode + 3],
1854 pIemCpu->abOpcode[offOpcode + 4],
1855 pIemCpu->abOpcode[offOpcode + 5],
1856 pIemCpu->abOpcode[offOpcode + 6],
1857 pIemCpu->abOpcode[offOpcode + 7]);
1858 pIemCpu->offOpcode = offOpcode + 8;
1859 }
1860 else
1861 *pu64 = 0;
1862 return rcStrict;
1863}
1864
1865
1866/**
1867 * Fetches the next opcode qword.
1868 *
1869 * @returns Strict VBox status code.
1870 * @param pIemCpu The IEM state.
1871 * @param pu64 Where to return the opcode qword.
1872 */
1873DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1874{
1875 uint8_t const offOpcode = pIemCpu->offOpcode;
1876 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1877 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1878
1879 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1880 pIemCpu->abOpcode[offOpcode + 1],
1881 pIemCpu->abOpcode[offOpcode + 2],
1882 pIemCpu->abOpcode[offOpcode + 3],
1883 pIemCpu->abOpcode[offOpcode + 4],
1884 pIemCpu->abOpcode[offOpcode + 5],
1885 pIemCpu->abOpcode[offOpcode + 6],
1886 pIemCpu->abOpcode[offOpcode + 7]);
1887 pIemCpu->offOpcode = offOpcode + 8;
1888 return VINF_SUCCESS;
1889}
1890
1891
1892/**
1893 * Fetches the next opcode quad word, returns automatically on failure.
1894 *
1895 * @param a_pu64 Where to return the opcode quad word.
1896 * @remark Implicitly references pIemCpu.
1897 */
1898#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1899 do \
1900 { \
1901 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1902 if (rcStrict2 != VINF_SUCCESS) \
1903 return rcStrict2; \
1904 } while (0)
1905
1906
1907/** @name Misc Worker Functions.
1908 * @{
1909 */
1910
1911
1912/**
1913 * Validates a new SS segment.
1914 *
1915 * @returns VBox strict status code.
1916 * @param pIemCpu The IEM per CPU instance data.
1917 * @param pCtx The CPU context.
1918 * @param NewSS The new SS selctor.
1919 * @param uCpl The CPL to load the stack for.
1920 * @param pDesc Where to return the descriptor.
1921 */
1922static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1923{
1924 NOREF(pCtx);
1925
1926 /* Null selectors are not allowed (we're not called for dispatching
1927 interrupts with SS=0 in long mode). */
1928 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1929 {
1930 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #TS(0)\n", NewSS));
1931 return iemRaiseTaskSwitchFault0(pIemCpu);
1932 }
1933
1934 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1935 if ((NewSS & X86_SEL_RPL) != uCpl)
1936 {
1937 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1938 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1939 }
1940
1941 /*
1942 * Read the descriptor.
1943 */
1944 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1945 if (rcStrict != VINF_SUCCESS)
1946 return rcStrict;
1947
1948 /*
1949 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1950 */
1951 if (!pDesc->Legacy.Gen.u1DescType)
1952 {
1953 Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1954 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1955 }
1956
1957 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1958 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1959 {
1960 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1961 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1962 }
1963 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1964 {
1965 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1966 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1967 }
1968
1969 /* Is it there? */
1970 /** @todo testcase: Is this checked before the canonical / limit check below? */
1971 if (!pDesc->Legacy.Gen.u1Present)
1972 {
1973 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1974 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
1975 }
1976
1977 return VINF_SUCCESS;
1978}
1979
1980
1981/**
1982 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1983 * not.
1984 *
1985 * @param a_pIemCpu The IEM per CPU data.
1986 * @param a_pCtx The CPU context.
1987 */
1988#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1989# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1990 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
1991 ? (a_pCtx)->eflags.u \
1992 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
1993#else
1994# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
1995 ( (a_pCtx)->eflags.u )
1996#endif
1997
1998/**
1999 * Updates the EFLAGS in the correct manner wrt. PATM.
2000 *
2001 * @param a_pIemCpu The IEM per CPU data.
2002 * @param a_pCtx The CPU context.
2003 */
2004#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2005# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2006 do { \
2007 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2008 (a_pCtx)->eflags.u = (a_fEfl); \
2009 else \
2010 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2011 } while (0)
2012#else
2013# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2014 do { \
2015 (a_pCtx)->eflags.u = (a_fEfl); \
2016 } while (0)
2017#endif
2018
2019
2020/** @} */
2021
2022/** @name Raising Exceptions.
2023 *
2024 * @{
2025 */
2026
2027/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2028 * @{ */
2029/** CPU exception. */
2030#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2031/** External interrupt (from PIC, APIC, whatever). */
2032#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2033/** Software interrupt (int or into, not bound).
2034 * Returns to the following instruction */
2035#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2036/** Takes an error code. */
2037#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2038/** Takes a CR2. */
2039#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2040/** Generated by the breakpoint instruction. */
2041#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2042/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2043#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2044/** @} */
2045
2046
2047/**
2048 * Loads the specified stack far pointer from the TSS.
2049 *
2050 * @returns VBox strict status code.
2051 * @param pIemCpu The IEM per CPU instance data.
2052 * @param pCtx The CPU context.
2053 * @param uCpl The CPL to load the stack for.
2054 * @param pSelSS Where to return the new stack segment.
2055 * @param puEsp Where to return the new stack pointer.
2056 */
2057static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2058 PRTSEL pSelSS, uint32_t *puEsp)
2059{
2060 VBOXSTRICTRC rcStrict;
2061 Assert(uCpl < 4);
2062 *puEsp = 0; /* make gcc happy */
2063 *pSelSS = 0; /* make gcc happy */
2064
2065 switch (pCtx->tr.Attr.n.u4Type)
2066 {
2067 /*
2068 * 16-bit TSS (X86TSS16).
2069 */
2070 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2071 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2072 {
2073 uint32_t off = uCpl * 4 + 2;
2074 if (off + 4 > pCtx->tr.u32Limit)
2075 {
2076 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2077 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2078 }
2079
2080 uint32_t u32Tmp = 0; /* gcc maybe... */
2081 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2082 if (rcStrict == VINF_SUCCESS)
2083 {
2084 *puEsp = RT_LOWORD(u32Tmp);
2085 *pSelSS = RT_HIWORD(u32Tmp);
2086 return VINF_SUCCESS;
2087 }
2088 break;
2089 }
2090
2091 /*
2092 * 32-bit TSS (X86TSS32).
2093 */
2094 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2095 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2096 {
2097 uint32_t off = uCpl * 8 + 4;
2098 if (off + 7 > pCtx->tr.u32Limit)
2099 {
2100 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2101 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2102 }
2103
2104 uint64_t u64Tmp;
2105 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2106 if (rcStrict == VINF_SUCCESS)
2107 {
2108 *puEsp = u64Tmp & UINT32_MAX;
2109 *pSelSS = (RTSEL)(u64Tmp >> 32);
2110 return VINF_SUCCESS;
2111 }
2112 break;
2113 }
2114
2115 default:
2116 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2117 }
2118 return rcStrict;
2119}
2120
2121
2122/**
2123 * Loads the specified stack pointer from the 64-bit TSS.
2124 *
2125 * @returns VBox strict status code.
2126 * @param pIemCpu The IEM per CPU instance data.
2127 * @param pCtx The CPU context.
2128 * @param uCpl The CPL to load the stack for.
2129 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2130 * @param puRsp Where to return the new stack pointer.
2131 */
2132static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
2133 uint64_t *puRsp)
2134{
2135 Assert(uCpl < 4);
2136 Assert(uIst < 8);
2137 *puRsp = 0; /* make gcc happy */
2138
2139 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
2140
2141 uint32_t off;
2142 if (uIst)
2143 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2144 else
2145 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2146 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2147 {
2148 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2149 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2150 }
2151
2152 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2153}
2154
2155
2156/**
2157 * Adjust the CPU state according to the exception being raised.
2158 *
2159 * @param pCtx The CPU context.
2160 * @param u8Vector The exception that has been raised.
2161 */
2162DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2163{
2164 switch (u8Vector)
2165 {
2166 case X86_XCPT_DB:
2167 pCtx->dr[7] &= ~X86_DR7_GD;
2168 break;
2169 /** @todo Read the AMD and Intel exception reference... */
2170 }
2171}
2172
2173
2174/**
2175 * Implements exceptions and interrupts for real mode.
2176 *
2177 * @returns VBox strict status code.
2178 * @param pIemCpu The IEM per CPU instance data.
2179 * @param pCtx The CPU context.
2180 * @param cbInstr The number of bytes to offset rIP by in the return
2181 * address.
2182 * @param u8Vector The interrupt / exception vector number.
2183 * @param fFlags The flags.
2184 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2185 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2186 */
2187static VBOXSTRICTRC
2188iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2189 PCPUMCTX pCtx,
2190 uint8_t cbInstr,
2191 uint8_t u8Vector,
2192 uint32_t fFlags,
2193 uint16_t uErr,
2194 uint64_t uCr2)
2195{
2196 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
2197 NOREF(uErr); NOREF(uCr2);
2198
2199 /*
2200 * Read the IDT entry.
2201 */
2202 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2203 {
2204 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2205 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2206 }
2207 RTFAR16 Idte;
2208 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2209 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2210 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2211 return rcStrict;
2212
2213 /*
2214 * Push the stack frame.
2215 */
2216 uint16_t *pu16Frame;
2217 uint64_t uNewRsp;
2218 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2219 if (rcStrict != VINF_SUCCESS)
2220 return rcStrict;
2221
2222 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2223 pu16Frame[2] = (uint16_t)fEfl;
2224 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2225 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2226 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2227 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2228 return rcStrict;
2229
2230 /*
2231 * Load the vector address into cs:ip and make exception specific state
2232 * adjustments.
2233 */
2234 pCtx->cs.Sel = Idte.sel;
2235 pCtx->cs.ValidSel = Idte.sel;
2236 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2237 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2238 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2239 pCtx->rip = Idte.off;
2240 fEfl &= ~X86_EFL_IF;
2241 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2242
2243 /** @todo do we actually do this in real mode? */
2244 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2245 iemRaiseXcptAdjustState(pCtx, u8Vector);
2246
2247 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2248}
2249
2250
2251/**
2252 * Loads a NULL data selector into when coming from V8086 mode.
2253 *
2254 * @param pIemCpu The IEM per CPU instance data.
2255 * @param pSReg Pointer to the segment register.
2256 */
2257static void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2258{
2259 pSReg->Sel = 0;
2260 pSReg->ValidSel = 0;
2261 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2262 {
2263 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2264 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2265 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2266 }
2267 else
2268 {
2269 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2270 /** @todo check this on AMD-V */
2271 pSReg->u64Base = 0;
2272 pSReg->u32Limit = 0;
2273 }
2274}
2275
2276
2277/**
2278 * Implements exceptions and interrupts for protected mode.
2279 *
2280 * @returns VBox strict status code.
2281 * @param pIemCpu The IEM per CPU instance data.
2282 * @param pCtx The CPU context.
2283 * @param cbInstr The number of bytes to offset rIP by in the return
2284 * address.
2285 * @param u8Vector The interrupt / exception vector number.
2286 * @param fFlags The flags.
2287 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2288 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2289 */
2290static VBOXSTRICTRC
2291iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
2292 PCPUMCTX pCtx,
2293 uint8_t cbInstr,
2294 uint8_t u8Vector,
2295 uint32_t fFlags,
2296 uint16_t uErr,
2297 uint64_t uCr2)
2298{
2299 NOREF(cbInstr);
2300
2301 /*
2302 * Read the IDT entry.
2303 */
2304 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2305 {
2306 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2307 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2308 }
2309 X86DESC Idte;
2310 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
2311 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
2312 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2313 return rcStrict;
2314 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2315 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2316 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2317
2318 /*
2319 * Check the descriptor type, DPL and such.
2320 * ASSUMES this is done in the same order as described for call-gate calls.
2321 */
2322 if (Idte.Gate.u1DescType)
2323 {
2324 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2325 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2326 }
2327 uint8_t f32BitGate = true;
2328 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2329 switch (Idte.Gate.u4Type)
2330 {
2331 case X86_SEL_TYPE_SYS_UNDEFINED:
2332 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2333 case X86_SEL_TYPE_SYS_LDT:
2334 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2335 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2336 case X86_SEL_TYPE_SYS_UNDEFINED2:
2337 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2338 case X86_SEL_TYPE_SYS_UNDEFINED3:
2339 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2340 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2341 case X86_SEL_TYPE_SYS_UNDEFINED4:
2342 {
2343 /** @todo check what actually happens when the type is wrong...
2344 * esp. call gates. */
2345 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2346 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2347 }
2348
2349 case X86_SEL_TYPE_SYS_286_INT_GATE:
2350 f32BitGate = false;
2351 case X86_SEL_TYPE_SYS_386_INT_GATE:
2352 fEflToClear |= X86_EFL_IF;
2353 break;
2354
2355 case X86_SEL_TYPE_SYS_TASK_GATE:
2356 /** @todo task gates. */
2357 AssertFailedReturn(VERR_NOT_SUPPORTED);
2358
2359 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2360 f32BitGate = false;
2361 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2362 break;
2363
2364 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2365 }
2366
2367 /* Check DPL against CPL if applicable. */
2368 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2369 {
2370 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2371 {
2372 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2373 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2374 }
2375 }
2376
2377 /* Is it there? */
2378 if (!Idte.Gate.u1Present)
2379 {
2380 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2381 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2382 }
2383
2384 /* A null CS is bad. */
2385 RTSEL NewCS = Idte.Gate.u16Sel;
2386 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2387 {
2388 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2389 return iemRaiseGeneralProtectionFault0(pIemCpu);
2390 }
2391
2392 /* Fetch the descriptor for the new CS. */
2393 IEMSELDESC DescCS;
2394 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
2395 if (rcStrict != VINF_SUCCESS)
2396 {
2397 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2398 return rcStrict;
2399 }
2400
2401 /* Must be a code segment. */
2402 if (!DescCS.Legacy.Gen.u1DescType)
2403 {
2404 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2405 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2406 }
2407 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2408 {
2409 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2410 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2411 }
2412
2413 /* Don't allow lowering the privilege level. */
2414 /** @todo Does the lowering of privileges apply to software interrupts
2415 * only? This has bearings on the more-privileged or
2416 * same-privilege stack behavior further down. A testcase would
2417 * be nice. */
2418 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2419 {
2420 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2421 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2422 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2423 }
2424
2425 /* Make sure the selector is present. */
2426 if (!DescCS.Legacy.Gen.u1Present)
2427 {
2428 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2429 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2430 }
2431
2432 /* Check the new EIP against the new CS limit. */
2433 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2434 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2435 ? Idte.Gate.u16OffsetLow
2436 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2437 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2438 if (uNewEip > cbLimitCS)
2439 {
2440 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2441 u8Vector, uNewEip, cbLimitCS, NewCS));
2442 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2443 }
2444
2445 /* Calc the flag image to push. */
2446 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2447 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
2448 fEfl &= ~X86_EFL_RF;
2449 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2450 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
2451
2452 /* From V8086 mode only go to CPL 0. */
2453 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2454 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2455 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
2456 {
2457 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
2458 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
2459 }
2460
2461 /*
2462 * If the privilege level changes, we need to get a new stack from the TSS.
2463 * This in turns means validating the new SS and ESP...
2464 */
2465 if (uNewCpl != pIemCpu->uCpl)
2466 {
2467 RTSEL NewSS;
2468 uint32_t uNewEsp;
2469 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
2470 if (rcStrict != VINF_SUCCESS)
2471 return rcStrict;
2472
2473 IEMSELDESC DescSS;
2474 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
2475 if (rcStrict != VINF_SUCCESS)
2476 return rcStrict;
2477
2478 /* Check that there is sufficient space for the stack frame. */
2479 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2480 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
2481 {
2482 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2483 }
2484
2485 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
2486 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
2487 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
2488 if ( uNewEsp - 1 > cbLimitSS
2489 || uNewEsp < cbStackFrame)
2490 {
2491 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
2492 u8Vector, NewSS, uNewEsp, cbStackFrame));
2493 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2494 }
2495
2496 /*
2497 * Start making changes.
2498 */
2499
2500 /* Create the stack frame. */
2501 RTPTRUNION uStackFrame;
2502 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2503 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2504 if (rcStrict != VINF_SUCCESS)
2505 return rcStrict;
2506 void * const pvStackFrame = uStackFrame.pv;
2507 if (f32BitGate)
2508 {
2509 if (fFlags & IEM_XCPT_FLAGS_ERR)
2510 *uStackFrame.pu32++ = uErr;
2511 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
2512 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2513 uStackFrame.pu32[2] = fEfl;
2514 uStackFrame.pu32[3] = pCtx->esp;
2515 uStackFrame.pu32[4] = pCtx->ss.Sel;
2516 if (fEfl & X86_EFL_VM)
2517 {
2518 uStackFrame.pu32[1] = pCtx->cs.Sel;
2519 uStackFrame.pu32[5] = pCtx->es.Sel;
2520 uStackFrame.pu32[6] = pCtx->ds.Sel;
2521 uStackFrame.pu32[7] = pCtx->fs.Sel;
2522 uStackFrame.pu32[8] = pCtx->gs.Sel;
2523 }
2524 }
2525 else
2526 {
2527 if (fFlags & IEM_XCPT_FLAGS_ERR)
2528 *uStackFrame.pu16++ = uErr;
2529 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2530 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2531 uStackFrame.pu16[2] = fEfl;
2532 uStackFrame.pu16[3] = pCtx->sp;
2533 uStackFrame.pu16[4] = pCtx->ss.Sel;
2534 if (fEfl & X86_EFL_VM)
2535 {
2536 uStackFrame.pu16[1] = pCtx->cs.Sel;
2537 uStackFrame.pu16[5] = pCtx->es.Sel;
2538 uStackFrame.pu16[6] = pCtx->ds.Sel;
2539 uStackFrame.pu16[7] = pCtx->fs.Sel;
2540 uStackFrame.pu16[8] = pCtx->gs.Sel;
2541 }
2542 }
2543 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2544 if (rcStrict != VINF_SUCCESS)
2545 return rcStrict;
2546
2547 /* Mark the selectors 'accessed' (hope this is the correct time). */
2548 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2549 * after pushing the stack frame? (Write protect the gdt + stack to
2550 * find out.) */
2551 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2552 {
2553 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2554 if (rcStrict != VINF_SUCCESS)
2555 return rcStrict;
2556 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2557 }
2558
2559 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2560 {
2561 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2562 if (rcStrict != VINF_SUCCESS)
2563 return rcStrict;
2564 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2565 }
2566
2567 /*
2568 * Start comitting the register changes (joins with the DPL=CPL branch).
2569 */
2570 pCtx->ss.Sel = NewSS;
2571 pCtx->ss.ValidSel = NewSS;
2572 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2573 pCtx->ss.u32Limit = cbLimitSS;
2574 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2575 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2576 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2577 pIemCpu->uCpl = uNewCpl;
2578
2579 if (fEfl & X86_EFL_VM)
2580 {
2581 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
2582 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
2583 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
2584 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
2585 }
2586 }
2587 /*
2588 * Same privilege, no stack change and smaller stack frame.
2589 */
2590 else
2591 {
2592 uint64_t uNewRsp;
2593 RTPTRUNION uStackFrame;
2594 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
2595 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2596 if (rcStrict != VINF_SUCCESS)
2597 return rcStrict;
2598 void * const pvStackFrame = uStackFrame.pv;
2599
2600 if (f32BitGate)
2601 {
2602 if (fFlags & IEM_XCPT_FLAGS_ERR)
2603 *uStackFrame.pu32++ = uErr;
2604 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2605 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2606 uStackFrame.pu32[2] = fEfl;
2607 }
2608 else
2609 {
2610 if (fFlags & IEM_XCPT_FLAGS_ERR)
2611 *uStackFrame.pu16++ = uErr;
2612 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2613 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2614 uStackFrame.pu16[2] = fEfl;
2615 }
2616 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2617 if (rcStrict != VINF_SUCCESS)
2618 return rcStrict;
2619
2620 /* Mark the CS selector as 'accessed'. */
2621 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2622 {
2623 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2624 if (rcStrict != VINF_SUCCESS)
2625 return rcStrict;
2626 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2627 }
2628
2629 /*
2630 * Start committing the register changes (joins with the other branch).
2631 */
2632 pCtx->rsp = uNewRsp;
2633 }
2634
2635 /* ... register committing continues. */
2636 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2637 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2638 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2639 pCtx->cs.u32Limit = cbLimitCS;
2640 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2641 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2642
2643 pCtx->rip = uNewEip;
2644 fEfl &= ~fEflToClear;
2645 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2646
2647 if (fFlags & IEM_XCPT_FLAGS_CR2)
2648 pCtx->cr2 = uCr2;
2649
2650 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2651 iemRaiseXcptAdjustState(pCtx, u8Vector);
2652
2653 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2654}
2655
2656
2657/**
2658 * Implements exceptions and interrupts for long mode.
2659 *
2660 * @returns VBox strict status code.
2661 * @param pIemCpu The IEM per CPU instance data.
2662 * @param pCtx The CPU context.
2663 * @param cbInstr The number of bytes to offset rIP by in the return
2664 * address.
2665 * @param u8Vector The interrupt / exception vector number.
2666 * @param fFlags The flags.
2667 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2668 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2669 */
2670static VBOXSTRICTRC
2671iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
2672 PCPUMCTX pCtx,
2673 uint8_t cbInstr,
2674 uint8_t u8Vector,
2675 uint32_t fFlags,
2676 uint16_t uErr,
2677 uint64_t uCr2)
2678{
2679 NOREF(cbInstr);
2680
2681 /*
2682 * Read the IDT entry.
2683 */
2684 uint16_t offIdt = (uint16_t)u8Vector << 4;
2685 if (pCtx->idtr.cbIdt < offIdt + 7)
2686 {
2687 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2688 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2689 }
2690 X86DESC64 Idte;
2691 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
2692 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2693 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
2694 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2695 return rcStrict;
2696 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2697 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2698 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2699
2700 /*
2701 * Check the descriptor type, DPL and such.
2702 * ASSUMES this is done in the same order as described for call-gate calls.
2703 */
2704 if (Idte.Gate.u1DescType)
2705 {
2706 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2707 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2708 }
2709 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2710 switch (Idte.Gate.u4Type)
2711 {
2712 case AMD64_SEL_TYPE_SYS_INT_GATE:
2713 fEflToClear |= X86_EFL_IF;
2714 break;
2715 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2716 break;
2717
2718 default:
2719 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2720 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2721 }
2722
2723 /* Check DPL against CPL if applicable. */
2724 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2725 {
2726 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
2727 {
2728 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2729 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2730 }
2731 }
2732
2733 /* Is it there? */
2734 if (!Idte.Gate.u1Present)
2735 {
2736 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2737 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2738 }
2739
2740 /* A null CS is bad. */
2741 RTSEL NewCS = Idte.Gate.u16Sel;
2742 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2743 {
2744 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2745 return iemRaiseGeneralProtectionFault0(pIemCpu);
2746 }
2747
2748 /* Fetch the descriptor for the new CS. */
2749 IEMSELDESC DescCS;
2750 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
2751 if (rcStrict != VINF_SUCCESS)
2752 {
2753 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2754 return rcStrict;
2755 }
2756
2757 /* Must be a 64-bit code segment. */
2758 if (!DescCS.Long.Gen.u1DescType)
2759 {
2760 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2761 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2762 }
2763 if ( !DescCS.Long.Gen.u1Long
2764 || DescCS.Long.Gen.u1DefBig
2765 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2766 {
2767 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2768 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2769 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2770 }
2771
2772 /* Don't allow lowering the privilege level. For non-conforming CS
2773 selectors, the CS.DPL sets the privilege level the trap/interrupt
2774 handler runs at. For conforming CS selectors, the CPL remains
2775 unchanged, but the CS.DPL must be <= CPL. */
2776 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2777 * when CPU in Ring-0. Result \#GP? */
2778 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
2779 {
2780 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2781 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2782 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2783 }
2784
2785
2786 /* Make sure the selector is present. */
2787 if (!DescCS.Legacy.Gen.u1Present)
2788 {
2789 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2790 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2791 }
2792
2793 /* Check that the new RIP is canonical. */
2794 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2795 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2796 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2797 if (!IEM_IS_CANONICAL(uNewRip))
2798 {
2799 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2800 return iemRaiseGeneralProtectionFault0(pIemCpu);
2801 }
2802
2803 /*
2804 * If the privilege level changes or if the IST isn't zero, we need to get
2805 * a new stack from the TSS.
2806 */
2807 uint64_t uNewRsp;
2808 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2809 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
2810 if ( uNewCpl != pIemCpu->uCpl
2811 || Idte.Gate.u3IST != 0)
2812 {
2813 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2814 if (rcStrict != VINF_SUCCESS)
2815 return rcStrict;
2816 }
2817 else
2818 uNewRsp = pCtx->rsp;
2819 uNewRsp &= ~(uint64_t)0xf;
2820
2821 /*
2822 * Calc the flag image to push.
2823 */
2824 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2825 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
2826 fEfl &= ~X86_EFL_RF;
2827 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2828 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
2829
2830 /*
2831 * Start making changes.
2832 */
2833
2834 /* Create the stack frame. */
2835 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2836 RTPTRUNION uStackFrame;
2837 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
2838 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
2839 if (rcStrict != VINF_SUCCESS)
2840 return rcStrict;
2841 void * const pvStackFrame = uStackFrame.pv;
2842
2843 if (fFlags & IEM_XCPT_FLAGS_ERR)
2844 *uStackFrame.pu64++ = uErr;
2845 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
2846 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
2847 uStackFrame.pu64[2] = fEfl;
2848 uStackFrame.pu64[3] = pCtx->rsp;
2849 uStackFrame.pu64[4] = pCtx->ss.Sel;
2850 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
2851 if (rcStrict != VINF_SUCCESS)
2852 return rcStrict;
2853
2854 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2855 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2856 * after pushing the stack frame? (Write protect the gdt + stack to
2857 * find out.) */
2858 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2859 {
2860 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2861 if (rcStrict != VINF_SUCCESS)
2862 return rcStrict;
2863 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2864 }
2865
2866 /*
2867 * Start comitting the register changes.
2868 */
2869 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2870 * hidden registers when interrupting 32-bit or 16-bit code! */
2871 if (uNewCpl != pIemCpu->uCpl)
2872 {
2873 pCtx->ss.Sel = 0 | uNewCpl;
2874 pCtx->ss.ValidSel = 0 | uNewCpl;
2875 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2876 pCtx->ss.u32Limit = UINT32_MAX;
2877 pCtx->ss.u64Base = 0;
2878 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2879 }
2880 pCtx->rsp = uNewRsp - cbStackFrame;
2881 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2882 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2883 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2884 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2885 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2886 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2887 pCtx->rip = uNewRip;
2888 pIemCpu->uCpl = uNewCpl;
2889
2890 fEfl &= ~fEflToClear;
2891 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2892
2893 if (fFlags & IEM_XCPT_FLAGS_CR2)
2894 pCtx->cr2 = uCr2;
2895
2896 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2897 iemRaiseXcptAdjustState(pCtx, u8Vector);
2898
2899 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2900}
2901
2902
2903/**
2904 * Implements exceptions and interrupts.
2905 *
2906 * All exceptions and interrupts goes thru this function!
2907 *
2908 * @returns VBox strict status code.
2909 * @param pIemCpu The IEM per CPU instance data.
2910 * @param cbInstr The number of bytes to offset rIP by in the return
2911 * address.
2912 * @param u8Vector The interrupt / exception vector number.
2913 * @param fFlags The flags.
2914 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2915 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2916 */
2917DECL_NO_INLINE(static, VBOXSTRICTRC)
2918iemRaiseXcptOrInt(PIEMCPU pIemCpu,
2919 uint8_t cbInstr,
2920 uint8_t u8Vector,
2921 uint32_t fFlags,
2922 uint16_t uErr,
2923 uint64_t uCr2)
2924{
2925 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2926
2927 /*
2928 * Perform the V8086 IOPL check and upgrade the fault without nesting.
2929 */
2930 if ( pCtx->eflags.Bits.u1VM
2931 && pCtx->eflags.Bits.u2IOPL != 3
2932 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2933 && (pCtx->cr0 & X86_CR0_PE) )
2934 {
2935 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
2936 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2937 u8Vector = X86_XCPT_GP;
2938 uErr = 0;
2939 }
2940#ifdef DBGFTRACE_ENABLED
2941 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
2942 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
2943 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
2944#endif
2945
2946 /*
2947 * Do recursion accounting.
2948 */
2949 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
2950 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
2951 if (pIemCpu->cXcptRecursions == 0)
2952 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2953 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
2954 else
2955 {
2956 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2957 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
2958
2959 /** @todo double and tripple faults. */
2960 if (pIemCpu->cXcptRecursions >= 3)
2961 {
2962#ifdef DEBUG_bird
2963 AssertFailed();
2964#endif
2965 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2966 }
2967
2968 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
2969 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
2970 {
2971 ....
2972 } */
2973 }
2974 pIemCpu->cXcptRecursions++;
2975 pIemCpu->uCurXcpt = u8Vector;
2976 pIemCpu->fCurXcpt = fFlags;
2977
2978 /*
2979 * Extensive logging.
2980 */
2981#if defined(LOG_ENABLED) && defined(IN_RING3)
2982 if (LogIs3Enabled())
2983 {
2984 PVM pVM = IEMCPU_TO_VM(pIemCpu);
2985 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2986 char szRegs[4096];
2987 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2988 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2989 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2990 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2991 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2992 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2993 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2994 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2995 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2996 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2997 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2998 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2999 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3000 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3001 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3002 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3003 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3004 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3005 " efer=%016VR{efer}\n"
3006 " pat=%016VR{pat}\n"
3007 " sf_mask=%016VR{sf_mask}\n"
3008 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3009 " lstar=%016VR{lstar}\n"
3010 " star=%016VR{star} cstar=%016VR{cstar}\n"
3011 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3012 );
3013
3014 char szInstr[256];
3015 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3016 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3017 szInstr, sizeof(szInstr), NULL);
3018 Log3(("%s%s\n", szRegs, szInstr));
3019 }
3020#endif /* LOG_ENABLED */
3021
3022 /*
3023 * Call the mode specific worker function.
3024 */
3025 VBOXSTRICTRC rcStrict;
3026 if (!(pCtx->cr0 & X86_CR0_PE))
3027 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3028 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
3029 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3030 else
3031 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
3032
3033 /*
3034 * Unwind.
3035 */
3036 pIemCpu->cXcptRecursions--;
3037 pIemCpu->uCurXcpt = uPrevXcpt;
3038 pIemCpu->fCurXcpt = fPrevXcpt;
3039 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
3040 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
3041 return rcStrict;
3042}
3043
3044
3045/** \#DE - 00. */
3046DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
3047{
3048 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3049}
3050
3051
3052/** \#DB - 01.
3053 * @note This automatically clear DR7.GD. */
3054DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
3055{
3056 /** @todo set/clear RF. */
3057 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
3058 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3059}
3060
3061
3062/** \#UD - 06. */
3063DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
3064{
3065 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3066}
3067
3068
3069/** \#NM - 07. */
3070DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
3071{
3072 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3073}
3074
3075
3076#ifdef SOME_UNUSED_FUNCTION
3077/** \#TS(err) - 0a. */
3078DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
3079{
3080 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3081}
3082#endif
3083
3084
3085/** \#TS(tr) - 0a. */
3086DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
3087{
3088 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3089 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
3090}
3091
3092
3093/** \#TS(0) - 0a. */
3094DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
3095{
3096 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3097 0, 0);
3098}
3099
3100
3101/** \#TS(err) - 0a. */
3102DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3103{
3104 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3105 uSel & X86_SEL_MASK_OFF_RPL, 0);
3106}
3107
3108
3109/** \#NP(err) - 0b. */
3110DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
3111{
3112 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3113}
3114
3115
3116/** \#NP(seg) - 0b. */
3117DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
3118{
3119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3120 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
3121}
3122
3123
3124/** \#NP(sel) - 0b. */
3125DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3126{
3127 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3128 uSel & ~X86_SEL_RPL, 0);
3129}
3130
3131
3132/** \#SS(seg) - 0c. */
3133DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
3134{
3135 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3136 uSel & ~X86_SEL_RPL, 0);
3137}
3138
3139
3140/** \#GP(n) - 0d. */
3141DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
3142{
3143 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3144}
3145
3146
3147/** \#GP(0) - 0d. */
3148DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
3149{
3150 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3151}
3152
3153
3154/** \#GP(sel) - 0d. */
3155DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3156{
3157 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3158 Sel & ~X86_SEL_RPL, 0);
3159}
3160
3161
3162/** \#GP(0) - 0d. */
3163DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
3164{
3165 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3166}
3167
3168
3169/** \#GP(sel) - 0d. */
3170DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3171{
3172 NOREF(iSegReg); NOREF(fAccess);
3173 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
3174 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3175}
3176
3177
3178/** \#GP(sel) - 0d. */
3179DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
3180{
3181 NOREF(Sel);
3182 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3183}
3184
3185
3186/** \#GP(sel) - 0d. */
3187DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
3188{
3189 NOREF(iSegReg); NOREF(fAccess);
3190 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3191}
3192
3193
3194/** \#PF(n) - 0e. */
3195DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
3196{
3197 uint16_t uErr;
3198 switch (rc)
3199 {
3200 case VERR_PAGE_NOT_PRESENT:
3201 case VERR_PAGE_TABLE_NOT_PRESENT:
3202 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
3203 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
3204 uErr = 0;
3205 break;
3206
3207 default:
3208 AssertMsgFailed(("%Rrc\n", rc));
3209 case VERR_ACCESS_DENIED:
3210 uErr = X86_TRAP_PF_P;
3211 break;
3212
3213 /** @todo reserved */
3214 }
3215
3216 if (pIemCpu->uCpl == 3)
3217 uErr |= X86_TRAP_PF_US;
3218
3219 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
3220 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
3221 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
3222 uErr |= X86_TRAP_PF_ID;
3223
3224#if 0 /* This is so much non-sense, really. Why was it done like that? */
3225 /* Note! RW access callers reporting a WRITE protection fault, will clear
3226 the READ flag before calling. So, read-modify-write accesses (RW)
3227 can safely be reported as READ faults. */
3228 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
3229 uErr |= X86_TRAP_PF_RW;
3230#else
3231 if (fAccess & IEM_ACCESS_TYPE_WRITE)
3232 {
3233 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
3234 uErr |= X86_TRAP_PF_RW;
3235 }
3236#endif
3237
3238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
3239 uErr, GCPtrWhere);
3240}
3241
3242
3243/** \#MF(0) - 10. */
3244DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
3245{
3246 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3247}
3248
3249
3250/** \#AC(0) - 11. */
3251DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
3252{
3253 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3254}
3255
3256
3257/**
3258 * Macro for calling iemCImplRaiseDivideError().
3259 *
3260 * This enables us to add/remove arguments and force different levels of
3261 * inlining as we wish.
3262 *
3263 * @return Strict VBox status code.
3264 */
3265#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
3266IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
3267{
3268 NOREF(cbInstr);
3269 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3270}
3271
3272
3273/**
3274 * Macro for calling iemCImplRaiseInvalidLockPrefix().
3275 *
3276 * This enables us to add/remove arguments and force different levels of
3277 * inlining as we wish.
3278 *
3279 * @return Strict VBox status code.
3280 */
3281#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
3282IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
3283{
3284 NOREF(cbInstr);
3285 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3286}
3287
3288
3289/**
3290 * Macro for calling iemCImplRaiseInvalidOpcode().
3291 *
3292 * This enables us to add/remove arguments and force different levels of
3293 * inlining as we wish.
3294 *
3295 * @return Strict VBox status code.
3296 */
3297#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
3298IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
3299{
3300 NOREF(cbInstr);
3301 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3302}
3303
3304
3305/** @} */
3306
3307
3308/*
3309 *
3310 * Helpers routines.
3311 * Helpers routines.
3312 * Helpers routines.
3313 *
3314 */
3315
3316/**
3317 * Recalculates the effective operand size.
3318 *
3319 * @param pIemCpu The IEM state.
3320 */
3321static void iemRecalEffOpSize(PIEMCPU pIemCpu)
3322{
3323 switch (pIemCpu->enmCpuMode)
3324 {
3325 case IEMMODE_16BIT:
3326 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
3327 break;
3328 case IEMMODE_32BIT:
3329 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
3330 break;
3331 case IEMMODE_64BIT:
3332 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
3333 {
3334 case 0:
3335 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
3336 break;
3337 case IEM_OP_PRF_SIZE_OP:
3338 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3339 break;
3340 case IEM_OP_PRF_SIZE_REX_W:
3341 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
3342 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3343 break;
3344 }
3345 break;
3346 default:
3347 AssertFailed();
3348 }
3349}
3350
3351
3352/**
3353 * Sets the default operand size to 64-bit and recalculates the effective
3354 * operand size.
3355 *
3356 * @param pIemCpu The IEM state.
3357 */
3358static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
3359{
3360 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3361 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
3362 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
3363 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
3364 else
3365 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
3366}
3367
3368
3369/*
3370 *
3371 * Common opcode decoders.
3372 * Common opcode decoders.
3373 * Common opcode decoders.
3374 *
3375 */
3376//#include <iprt/mem.h>
3377
3378/**
3379 * Used to add extra details about a stub case.
3380 * @param pIemCpu The IEM per CPU state.
3381 */
3382static void iemOpStubMsg2(PIEMCPU pIemCpu)
3383{
3384#if defined(LOG_ENABLED) && defined(IN_RING3)
3385 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3386 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3387 char szRegs[4096];
3388 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3389 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3390 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3391 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3392 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3393 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3394 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3395 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3396 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3397 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3398 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3399 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3400 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3401 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3402 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3403 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3404 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3405 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3406 " efer=%016VR{efer}\n"
3407 " pat=%016VR{pat}\n"
3408 " sf_mask=%016VR{sf_mask}\n"
3409 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3410 " lstar=%016VR{lstar}\n"
3411 " star=%016VR{star} cstar=%016VR{cstar}\n"
3412 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3413 );
3414
3415 char szInstr[256];
3416 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3417 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3418 szInstr, sizeof(szInstr), NULL);
3419
3420 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
3421#else
3422 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
3423#endif
3424}
3425
3426/**
3427 * Complains about a stub.
3428 *
3429 * Providing two versions of this macro, one for daily use and one for use when
3430 * working on IEM.
3431 */
3432#if 0
3433# define IEMOP_BITCH_ABOUT_STUB() \
3434 do { \
3435 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
3436 iemOpStubMsg2(pIemCpu); \
3437 RTAssertPanic(); \
3438 } while (0)
3439#else
3440# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
3441#endif
3442
3443/** Stubs an opcode. */
3444#define FNIEMOP_STUB(a_Name) \
3445 FNIEMOP_DEF(a_Name) \
3446 { \
3447 IEMOP_BITCH_ABOUT_STUB(); \
3448 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3449 } \
3450 typedef int ignore_semicolon
3451
3452/** Stubs an opcode. */
3453#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
3454 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3455 { \
3456 IEMOP_BITCH_ABOUT_STUB(); \
3457 NOREF(a_Name0); \
3458 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
3459 } \
3460 typedef int ignore_semicolon
3461
3462/** Stubs an opcode which currently should raise \#UD. */
3463#define FNIEMOP_UD_STUB(a_Name) \
3464 FNIEMOP_DEF(a_Name) \
3465 { \
3466 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3467 return IEMOP_RAISE_INVALID_OPCODE(); \
3468 } \
3469 typedef int ignore_semicolon
3470
3471/** Stubs an opcode which currently should raise \#UD. */
3472#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
3473 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3474 { \
3475 NOREF(a_Name0); \
3476 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
3477 return IEMOP_RAISE_INVALID_OPCODE(); \
3478 } \
3479 typedef int ignore_semicolon
3480
3481
3482
3483/** @name Register Access.
3484 * @{
3485 */
3486
3487/**
3488 * Gets a reference (pointer) to the specified hidden segment register.
3489 *
3490 * @returns Hidden register reference.
3491 * @param pIemCpu The per CPU data.
3492 * @param iSegReg The segment register.
3493 */
3494static PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
3495{
3496 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3497 PCPUMSELREG pSReg;
3498 switch (iSegReg)
3499 {
3500 case X86_SREG_ES: pSReg = &pCtx->es; break;
3501 case X86_SREG_CS: pSReg = &pCtx->cs; break;
3502 case X86_SREG_SS: pSReg = &pCtx->ss; break;
3503 case X86_SREG_DS: pSReg = &pCtx->ds; break;
3504 case X86_SREG_FS: pSReg = &pCtx->fs; break;
3505 case X86_SREG_GS: pSReg = &pCtx->gs; break;
3506 default:
3507 AssertFailedReturn(NULL);
3508 }
3509#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3510 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
3511 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
3512#else
3513 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
3514#endif
3515 return pSReg;
3516}
3517
3518
3519/**
3520 * Gets a reference (pointer) to the specified segment register (the selector
3521 * value).
3522 *
3523 * @returns Pointer to the selector variable.
3524 * @param pIemCpu The per CPU data.
3525 * @param iSegReg The segment register.
3526 */
3527static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
3528{
3529 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3530 switch (iSegReg)
3531 {
3532 case X86_SREG_ES: return &pCtx->es.Sel;
3533 case X86_SREG_CS: return &pCtx->cs.Sel;
3534 case X86_SREG_SS: return &pCtx->ss.Sel;
3535 case X86_SREG_DS: return &pCtx->ds.Sel;
3536 case X86_SREG_FS: return &pCtx->fs.Sel;
3537 case X86_SREG_GS: return &pCtx->gs.Sel;
3538 }
3539 AssertFailedReturn(NULL);
3540}
3541
3542
3543/**
3544 * Fetches the selector value of a segment register.
3545 *
3546 * @returns The selector value.
3547 * @param pIemCpu The per CPU data.
3548 * @param iSegReg The segment register.
3549 */
3550static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
3551{
3552 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3553 switch (iSegReg)
3554 {
3555 case X86_SREG_ES: return pCtx->es.Sel;
3556 case X86_SREG_CS: return pCtx->cs.Sel;
3557 case X86_SREG_SS: return pCtx->ss.Sel;
3558 case X86_SREG_DS: return pCtx->ds.Sel;
3559 case X86_SREG_FS: return pCtx->fs.Sel;
3560 case X86_SREG_GS: return pCtx->gs.Sel;
3561 }
3562 AssertFailedReturn(0xffff);
3563}
3564
3565
3566/**
3567 * Gets a reference (pointer) to the specified general register.
3568 *
3569 * @returns Register reference.
3570 * @param pIemCpu The per CPU data.
3571 * @param iReg The general register.
3572 */
3573static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
3574{
3575 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3576 switch (iReg)
3577 {
3578 case X86_GREG_xAX: return &pCtx->rax;
3579 case X86_GREG_xCX: return &pCtx->rcx;
3580 case X86_GREG_xDX: return &pCtx->rdx;
3581 case X86_GREG_xBX: return &pCtx->rbx;
3582 case X86_GREG_xSP: return &pCtx->rsp;
3583 case X86_GREG_xBP: return &pCtx->rbp;
3584 case X86_GREG_xSI: return &pCtx->rsi;
3585 case X86_GREG_xDI: return &pCtx->rdi;
3586 case X86_GREG_x8: return &pCtx->r8;
3587 case X86_GREG_x9: return &pCtx->r9;
3588 case X86_GREG_x10: return &pCtx->r10;
3589 case X86_GREG_x11: return &pCtx->r11;
3590 case X86_GREG_x12: return &pCtx->r12;
3591 case X86_GREG_x13: return &pCtx->r13;
3592 case X86_GREG_x14: return &pCtx->r14;
3593 case X86_GREG_x15: return &pCtx->r15;
3594 }
3595 AssertFailedReturn(NULL);
3596}
3597
3598
3599/**
3600 * Gets a reference (pointer) to the specified 8-bit general register.
3601 *
3602 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
3603 *
3604 * @returns Register reference.
3605 * @param pIemCpu The per CPU data.
3606 * @param iReg The register.
3607 */
3608static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
3609{
3610 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
3611 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
3612
3613 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
3614 if (iReg >= 4)
3615 pu8Reg++;
3616 return pu8Reg;
3617}
3618
3619
3620/**
3621 * Fetches the value of a 8-bit general register.
3622 *
3623 * @returns The register value.
3624 * @param pIemCpu The per CPU data.
3625 * @param iReg The register.
3626 */
3627static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
3628{
3629 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
3630 return *pbSrc;
3631}
3632
3633
3634/**
3635 * Fetches the value of a 16-bit general register.
3636 *
3637 * @returns The register value.
3638 * @param pIemCpu The per CPU data.
3639 * @param iReg The register.
3640 */
3641static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
3642{
3643 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
3644}
3645
3646
3647/**
3648 * Fetches the value of a 32-bit general register.
3649 *
3650 * @returns The register value.
3651 * @param pIemCpu The per CPU data.
3652 * @param iReg The register.
3653 */
3654static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
3655{
3656 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
3657}
3658
3659
3660/**
3661 * Fetches the value of a 64-bit general register.
3662 *
3663 * @returns The register value.
3664 * @param pIemCpu The per CPU data.
3665 * @param iReg The register.
3666 */
3667static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
3668{
3669 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
3670}
3671
3672
3673/**
3674 * Is the FPU state in FXSAVE format or not.
3675 *
3676 * @returns true if it is, false if it's in FNSAVE.
3677 * @param pVCpu Pointer to the VMCPU.
3678 */
3679DECLINLINE(bool) iemFRegIsFxSaveFormat(PIEMCPU pIemCpu)
3680{
3681#ifdef RT_ARCH_AMD64
3682 NOREF(pIemCpu);
3683 return true;
3684#else
3685 NOREF(pIemCpu); /// @todo return pVCpu->pVMR3->cpum.s.CPUFeatures.edx.u1FXSR;
3686 return true;
3687#endif
3688}
3689
3690
3691/**
3692 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
3693 *
3694 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3695 * segment limit.
3696 *
3697 * @param pIemCpu The per CPU data.
3698 * @param offNextInstr The offset of the next instruction.
3699 */
3700static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
3701{
3702 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3703 switch (pIemCpu->enmEffOpSize)
3704 {
3705 case IEMMODE_16BIT:
3706 {
3707 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3708 if ( uNewIp > pCtx->cs.u32Limit
3709 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3710 return iemRaiseGeneralProtectionFault0(pIemCpu);
3711 pCtx->rip = uNewIp;
3712 break;
3713 }
3714
3715 case IEMMODE_32BIT:
3716 {
3717 Assert(pCtx->rip <= UINT32_MAX);
3718 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3719
3720 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3721 if (uNewEip > pCtx->cs.u32Limit)
3722 return iemRaiseGeneralProtectionFault0(pIemCpu);
3723 pCtx->rip = uNewEip;
3724 break;
3725 }
3726
3727 case IEMMODE_64BIT:
3728 {
3729 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3730
3731 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3732 if (!IEM_IS_CANONICAL(uNewRip))
3733 return iemRaiseGeneralProtectionFault0(pIemCpu);
3734 pCtx->rip = uNewRip;
3735 break;
3736 }
3737
3738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3739 }
3740
3741 pCtx->eflags.Bits.u1RF = 0;
3742 return VINF_SUCCESS;
3743}
3744
3745
3746/**
3747 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
3748 *
3749 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3750 * segment limit.
3751 *
3752 * @returns Strict VBox status code.
3753 * @param pIemCpu The per CPU data.
3754 * @param offNextInstr The offset of the next instruction.
3755 */
3756static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
3757{
3758 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3759 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
3760
3761 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
3762 if ( uNewIp > pCtx->cs.u32Limit
3763 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3764 return iemRaiseGeneralProtectionFault0(pIemCpu);
3765 /** @todo Test 16-bit jump in 64-bit mode. possible? */
3766 pCtx->rip = uNewIp;
3767 pCtx->eflags.Bits.u1RF = 0;
3768
3769 return VINF_SUCCESS;
3770}
3771
3772
3773/**
3774 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
3775 *
3776 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3777 * segment limit.
3778 *
3779 * @returns Strict VBox status code.
3780 * @param pIemCpu The per CPU data.
3781 * @param offNextInstr The offset of the next instruction.
3782 */
3783static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
3784{
3785 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3786 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
3787
3788 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
3789 {
3790 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3791
3792 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
3793 if (uNewEip > pCtx->cs.u32Limit)
3794 return iemRaiseGeneralProtectionFault0(pIemCpu);
3795 pCtx->rip = uNewEip;
3796 }
3797 else
3798 {
3799 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3800
3801 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
3802 if (!IEM_IS_CANONICAL(uNewRip))
3803 return iemRaiseGeneralProtectionFault0(pIemCpu);
3804 pCtx->rip = uNewRip;
3805 }
3806 pCtx->eflags.Bits.u1RF = 0;
3807 return VINF_SUCCESS;
3808}
3809
3810
3811/**
3812 * Performs a near jump to the specified address.
3813 *
3814 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3815 * segment limit.
3816 *
3817 * @param pIemCpu The per CPU data.
3818 * @param uNewRip The new RIP value.
3819 */
3820static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
3821{
3822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3823 switch (pIemCpu->enmEffOpSize)
3824 {
3825 case IEMMODE_16BIT:
3826 {
3827 Assert(uNewRip <= UINT16_MAX);
3828 if ( uNewRip > pCtx->cs.u32Limit
3829 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
3830 return iemRaiseGeneralProtectionFault0(pIemCpu);
3831 /** @todo Test 16-bit jump in 64-bit mode. */
3832 pCtx->rip = uNewRip;
3833 break;
3834 }
3835
3836 case IEMMODE_32BIT:
3837 {
3838 Assert(uNewRip <= UINT32_MAX);
3839 Assert(pCtx->rip <= UINT32_MAX);
3840 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
3841
3842 if (uNewRip > pCtx->cs.u32Limit)
3843 return iemRaiseGeneralProtectionFault0(pIemCpu);
3844 pCtx->rip = uNewRip;
3845 break;
3846 }
3847
3848 case IEMMODE_64BIT:
3849 {
3850 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
3851
3852 if (!IEM_IS_CANONICAL(uNewRip))
3853 return iemRaiseGeneralProtectionFault0(pIemCpu);
3854 pCtx->rip = uNewRip;
3855 break;
3856 }
3857
3858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3859 }
3860
3861 pCtx->eflags.Bits.u1RF = 0;
3862 return VINF_SUCCESS;
3863}
3864
3865
3866/**
3867 * Get the address of the top of the stack.
3868 *
3869 * @param pIemCpu The per CPU data.
3870 * @param pCtx The CPU context which SP/ESP/RSP should be
3871 * read.
3872 */
3873DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
3874{
3875 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3876 return pCtx->rsp;
3877 if (pCtx->ss.Attr.n.u1DefBig)
3878 return pCtx->esp;
3879 return pCtx->sp;
3880}
3881
3882
3883/**
3884 * Updates the RIP/EIP/IP to point to the next instruction.
3885 *
3886 * This function leaves the EFLAGS.RF flag alone.
3887 *
3888 * @param pIemCpu The per CPU data.
3889 * @param cbInstr The number of bytes to add.
3890 */
3891static void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
3892{
3893 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3894 switch (pIemCpu->enmCpuMode)
3895 {
3896 case IEMMODE_16BIT:
3897 Assert(pCtx->rip <= UINT16_MAX);
3898 pCtx->eip += cbInstr;
3899 pCtx->eip &= UINT32_C(0xffff);
3900 break;
3901
3902 case IEMMODE_32BIT:
3903 pCtx->eip += cbInstr;
3904 Assert(pCtx->rip <= UINT32_MAX);
3905 break;
3906
3907 case IEMMODE_64BIT:
3908 pCtx->rip += cbInstr;
3909 break;
3910 default: AssertFailed();
3911 }
3912}
3913
3914
3915#if 0
3916/**
3917 * Updates the RIP/EIP/IP to point to the next instruction.
3918 *
3919 * @param pIemCpu The per CPU data.
3920 */
3921static void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
3922{
3923 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
3924}
3925#endif
3926
3927
3928
3929/**
3930 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
3931 *
3932 * @param pIemCpu The per CPU data.
3933 * @param cbInstr The number of bytes to add.
3934 */
3935static void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
3936{
3937 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3938
3939 pCtx->eflags.Bits.u1RF = 0;
3940
3941 switch (pIemCpu->enmCpuMode)
3942 {
3943 case IEMMODE_16BIT:
3944 Assert(pCtx->rip <= UINT16_MAX);
3945 pCtx->eip += cbInstr;
3946 pCtx->eip &= UINT32_C(0xffff);
3947 break;
3948
3949 case IEMMODE_32BIT:
3950 pCtx->eip += cbInstr;
3951 Assert(pCtx->rip <= UINT32_MAX);
3952 break;
3953
3954 case IEMMODE_64BIT:
3955 pCtx->rip += cbInstr;
3956 break;
3957 default: AssertFailed();
3958 }
3959}
3960
3961
3962/**
3963 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
3964 *
3965 * @param pIemCpu The per CPU data.
3966 */
3967static void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
3968{
3969 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
3970}
3971
3972
3973/**
3974 * Adds to the stack pointer.
3975 *
3976 * @param pIemCpu The per CPU data.
3977 * @param pCtx The CPU context which SP/ESP/RSP should be
3978 * updated.
3979 * @param cbToAdd The number of bytes to add.
3980 */
3981DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
3982{
3983 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3984 pCtx->rsp += cbToAdd;
3985 else if (pCtx->ss.Attr.n.u1DefBig)
3986 pCtx->esp += cbToAdd;
3987 else
3988 pCtx->sp += cbToAdd;
3989}
3990
3991
3992/**
3993 * Subtracts from the stack pointer.
3994 *
3995 * @param pIemCpu The per CPU data.
3996 * @param pCtx The CPU context which SP/ESP/RSP should be
3997 * updated.
3998 * @param cbToSub The number of bytes to subtract.
3999 */
4000DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4001{
4002 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4003 pCtx->rsp -= cbToSub;
4004 else if (pCtx->ss.Attr.n.u1DefBig)
4005 pCtx->esp -= cbToSub;
4006 else
4007 pCtx->sp -= cbToSub;
4008}
4009
4010
4011/**
4012 * Adds to the temporary stack pointer.
4013 *
4014 * @param pIemCpu The per CPU data.
4015 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4016 * @param cbToAdd The number of bytes to add.
4017 * @param pCtx Where to get the current stack mode.
4018 */
4019DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4020{
4021 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4022 pTmpRsp->u += cbToAdd;
4023 else if (pCtx->ss.Attr.n.u1DefBig)
4024 pTmpRsp->DWords.dw0 += cbToAdd;
4025 else
4026 pTmpRsp->Words.w0 += cbToAdd;
4027}
4028
4029
4030/**
4031 * Subtracts from the temporary stack pointer.
4032 *
4033 * @param pIemCpu The per CPU data.
4034 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4035 * @param cbToSub The number of bytes to subtract.
4036 * @param pCtx Where to get the current stack mode.
4037 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
4038 * expecting that.
4039 */
4040DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
4041{
4042 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4043 pTmpRsp->u -= cbToSub;
4044 else if (pCtx->ss.Attr.n.u1DefBig)
4045 pTmpRsp->DWords.dw0 -= cbToSub;
4046 else
4047 pTmpRsp->Words.w0 -= cbToSub;
4048}
4049
4050
4051/**
4052 * Calculates the effective stack address for a push of the specified size as
4053 * well as the new RSP value (upper bits may be masked).
4054 *
4055 * @returns Effective stack addressf for the push.
4056 * @param pIemCpu The IEM per CPU data.
4057 * @param pCtx Where to get the current stack mode.
4058 * @param cbItem The size of the stack item to pop.
4059 * @param puNewRsp Where to return the new RSP value.
4060 */
4061DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
4062{
4063 RTUINT64U uTmpRsp;
4064 RTGCPTR GCPtrTop;
4065 uTmpRsp.u = pCtx->rsp;
4066
4067 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4068 GCPtrTop = uTmpRsp.u -= cbItem;
4069 else if (pCtx->ss.Attr.n.u1DefBig)
4070 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
4071 else
4072 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
4073 *puNewRsp = uTmpRsp.u;
4074 return GCPtrTop;
4075}
4076
4077
4078/**
4079 * Gets the current stack pointer and calculates the value after a pop of the
4080 * specified size.
4081 *
4082 * @returns Current stack pointer.
4083 * @param pIemCpu The per CPU data.
4084 * @param pCtx Where to get the current stack mode.
4085 * @param cbItem The size of the stack item to pop.
4086 * @param puNewRsp Where to return the new RSP value.
4087 */
4088DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
4089{
4090 RTUINT64U uTmpRsp;
4091 RTGCPTR GCPtrTop;
4092 uTmpRsp.u = pCtx->rsp;
4093
4094 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4095 {
4096 GCPtrTop = uTmpRsp.u;
4097 uTmpRsp.u += cbItem;
4098 }
4099 else if (pCtx->ss.Attr.n.u1DefBig)
4100 {
4101 GCPtrTop = uTmpRsp.DWords.dw0;
4102 uTmpRsp.DWords.dw0 += cbItem;
4103 }
4104 else
4105 {
4106 GCPtrTop = uTmpRsp.Words.w0;
4107 uTmpRsp.Words.w0 += cbItem;
4108 }
4109 *puNewRsp = uTmpRsp.u;
4110 return GCPtrTop;
4111}
4112
4113
4114/**
4115 * Calculates the effective stack address for a push of the specified size as
4116 * well as the new temporary RSP value (upper bits may be masked).
4117 *
4118 * @returns Effective stack addressf for the push.
4119 * @param pIemCpu The per CPU data.
4120 * @param pTmpRsp The temporary stack pointer. This is updated.
4121 * @param cbItem The size of the stack item to pop.
4122 * @param puNewRsp Where to return the new RSP value.
4123 */
4124DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
4125{
4126 RTGCPTR GCPtrTop;
4127
4128 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4129 GCPtrTop = pTmpRsp->u -= cbItem;
4130 else if (pCtx->ss.Attr.n.u1DefBig)
4131 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
4132 else
4133 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
4134 return GCPtrTop;
4135}
4136
4137
4138/**
4139 * Gets the effective stack address for a pop of the specified size and
4140 * calculates and updates the temporary RSP.
4141 *
4142 * @returns Current stack pointer.
4143 * @param pIemCpu The per CPU data.
4144 * @param pTmpRsp The temporary stack pointer. This is updated.
4145 * @param pCtx Where to get the current stack mode.
4146 * @param cbItem The size of the stack item to pop.
4147 */
4148DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
4149{
4150 RTGCPTR GCPtrTop;
4151 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4152 {
4153 GCPtrTop = pTmpRsp->u;
4154 pTmpRsp->u += cbItem;
4155 }
4156 else if (pCtx->ss.Attr.n.u1DefBig)
4157 {
4158 GCPtrTop = pTmpRsp->DWords.dw0;
4159 pTmpRsp->DWords.dw0 += cbItem;
4160 }
4161 else
4162 {
4163 GCPtrTop = pTmpRsp->Words.w0;
4164 pTmpRsp->Words.w0 += cbItem;
4165 }
4166 return GCPtrTop;
4167}
4168
4169
4170/**
4171 * Checks if an Intel CPUID feature bit is set.
4172 *
4173 * @returns true / false.
4174 *
4175 * @param pIemCpu The IEM per CPU data.
4176 * @param fEdx The EDX bit to test, or 0 if ECX.
4177 * @param fEcx The ECX bit to test, or 0 if EDX.
4178 * @remarks Used via IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX,
4179 * IEM_IS_INTEL_CPUID_FEATURE_PRESENT_ECX and others.
4180 */
4181static bool iemRegIsIntelCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
4182{
4183 uint32_t uEax, uEbx, uEcx, uEdx;
4184 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x00000001, &uEax, &uEbx, &uEcx, &uEdx);
4185 return (fEcx && (uEcx & fEcx))
4186 || (fEdx && (uEdx & fEdx));
4187}
4188
4189
4190/**
4191 * Checks if an AMD CPUID feature bit is set.
4192 *
4193 * @returns true / false.
4194 *
4195 * @param pIemCpu The IEM per CPU data.
4196 * @param fEdx The EDX bit to test, or 0 if ECX.
4197 * @param fEcx The ECX bit to test, or 0 if EDX.
4198 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX,
4199 * IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX and others.
4200 */
4201static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)
4202{
4203 uint32_t uEax, uEbx, uEcx, uEdx;
4204 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);
4205 return (fEcx && (uEcx & fEcx))
4206 || (fEdx && (uEdx & fEdx));
4207}
4208
4209/** @} */
4210
4211
4212/** @name FPU access and helpers.
4213 *
4214 * @{
4215 */
4216
4217
4218/**
4219 * Hook for preparing to use the host FPU.
4220 *
4221 * This is necessary in ring-0 and raw-mode context.
4222 *
4223 * @param pIemCpu The IEM per CPU data.
4224 */
4225DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
4226{
4227#ifdef IN_RING3
4228 NOREF(pIemCpu);
4229#else
4230/** @todo RZ: FIXME */
4231//# error "Implement me"
4232#endif
4233}
4234
4235
4236/**
4237 * Hook for preparing to use the host FPU for SSE
4238 *
4239 * This is necessary in ring-0 and raw-mode context.
4240 *
4241 * @param pIemCpu The IEM per CPU data.
4242 */
4243DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
4244{
4245 iemFpuPrepareUsage(pIemCpu);
4246}
4247
4248
4249/**
4250 * Stores a QNaN value into a FPU register.
4251 *
4252 * @param pReg Pointer to the register.
4253 */
4254DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
4255{
4256 pReg->au32[0] = UINT32_C(0x00000000);
4257 pReg->au32[1] = UINT32_C(0xc0000000);
4258 pReg->au16[4] = UINT16_C(0xffff);
4259}
4260
4261
4262/**
4263 * Updates the FOP, FPU.CS and FPUIP registers.
4264 *
4265 * @param pIemCpu The IEM per CPU data.
4266 * @param pCtx The CPU context.
4267 */
4268DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4269{
4270 pCtx->fpu.FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
4271 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
4272 /** @todo FPU.CS and FPUIP needs to be kept seperately. */
4273 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4274 {
4275 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
4276 * happens in real mode here based on the fnsave and fnstenv images. */
4277 pCtx->fpu.CS = 0;
4278 pCtx->fpu.FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
4279 }
4280 else
4281 {
4282 pCtx->fpu.CS = pCtx->cs.Sel;
4283 pCtx->fpu.FPUIP = pCtx->rip;
4284 }
4285}
4286
4287
4288/**
4289 * Updates the FPU.DS and FPUDP registers.
4290 *
4291 * @param pIemCpu The IEM per CPU data.
4292 * @param pCtx The CPU context.
4293 * @param iEffSeg The effective segment register.
4294 * @param GCPtrEff The effective address relative to @a iEffSeg.
4295 */
4296DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4297{
4298 RTSEL sel;
4299 switch (iEffSeg)
4300 {
4301 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
4302 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
4303 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
4304 case X86_SREG_ES: sel = pCtx->es.Sel; break;
4305 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
4306 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
4307 default:
4308 AssertMsgFailed(("%d\n", iEffSeg));
4309 sel = pCtx->ds.Sel;
4310 }
4311 /** @todo FPU.DS and FPUDP needs to be kept seperately. */
4312 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4313 {
4314 pCtx->fpu.DS = 0;
4315 pCtx->fpu.FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
4316 }
4317 else
4318 {
4319 pCtx->fpu.DS = sel;
4320 pCtx->fpu.FPUDP = GCPtrEff;
4321 }
4322}
4323
4324
4325/**
4326 * Rotates the stack registers in the push direction.
4327 *
4328 * @param pCtx The CPU context.
4329 * @remarks This is a complete waste of time, but fxsave stores the registers in
4330 * stack order.
4331 */
4332DECLINLINE(void) iemFpuRotateStackPush(PCPUMCTX pCtx)
4333{
4334 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[7].r80;
4335 pCtx->fpu.aRegs[7].r80 = pCtx->fpu.aRegs[6].r80;
4336 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[5].r80;
4337 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[4].r80;
4338 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[3].r80;
4339 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[2].r80;
4340 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[1].r80;
4341 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[0].r80;
4342 pCtx->fpu.aRegs[0].r80 = r80Tmp;
4343}
4344
4345
4346/**
4347 * Rotates the stack registers in the pop direction.
4348 *
4349 * @param pCtx The CPU context.
4350 * @remarks This is a complete waste of time, but fxsave stores the registers in
4351 * stack order.
4352 */
4353DECLINLINE(void) iemFpuRotateStackPop(PCPUMCTX pCtx)
4354{
4355 RTFLOAT80U r80Tmp = pCtx->fpu.aRegs[0].r80;
4356 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[1].r80;
4357 pCtx->fpu.aRegs[1].r80 = pCtx->fpu.aRegs[2].r80;
4358 pCtx->fpu.aRegs[2].r80 = pCtx->fpu.aRegs[3].r80;
4359 pCtx->fpu.aRegs[3].r80 = pCtx->fpu.aRegs[4].r80;
4360 pCtx->fpu.aRegs[4].r80 = pCtx->fpu.aRegs[5].r80;
4361 pCtx->fpu.aRegs[5].r80 = pCtx->fpu.aRegs[6].r80;
4362 pCtx->fpu.aRegs[6].r80 = pCtx->fpu.aRegs[7].r80;
4363 pCtx->fpu.aRegs[7].r80 = r80Tmp;
4364}
4365
4366
4367/**
4368 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4369 * exception prevents it.
4370 *
4371 * @param pIemCpu The IEM per CPU data.
4372 * @param pResult The FPU operation result to push.
4373 * @param pCtx The CPU context.
4374 */
4375static void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx)
4376{
4377 /* Update FSW and bail if there are pending exceptions afterwards. */
4378 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4379 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4380 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4381 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4382 {
4383 pCtx->fpu.FSW = fFsw;
4384 return;
4385 }
4386
4387 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4388 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4389 {
4390 /* All is fine, push the actual value. */
4391 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4392 pCtx->fpu.aRegs[7].r80 = pResult->r80Result;
4393 }
4394 else if (pCtx->fpu.FCW & X86_FCW_IM)
4395 {
4396 /* Masked stack overflow, push QNaN. */
4397 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4398 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4399 }
4400 else
4401 {
4402 /* Raise stack overflow, don't push anything. */
4403 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4404 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4405 return;
4406 }
4407
4408 fFsw &= ~X86_FSW_TOP_MASK;
4409 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4410 pCtx->fpu.FSW = fFsw;
4411
4412 iemFpuRotateStackPush(pCtx);
4413}
4414
4415
4416/**
4417 * Stores a result in a FPU register and updates the FSW and FTW.
4418 *
4419 * @param pIemCpu The IEM per CPU data.
4420 * @param pResult The result to store.
4421 * @param iStReg Which FPU register to store it in.
4422 * @param pCtx The CPU context.
4423 */
4424static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
4425{
4426 Assert(iStReg < 8);
4427 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4428 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4429 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4430 pCtx->fpu.FTW |= RT_BIT(iReg);
4431 pCtx->fpu.aRegs[iStReg].r80 = pResult->r80Result;
4432}
4433
4434
4435/**
4436 * Only updates the FPU status word (FSW) with the result of the current
4437 * instruction.
4438 *
4439 * @param pCtx The CPU context.
4440 * @param u16FSW The FSW output of the current instruction.
4441 */
4442static void iemFpuUpdateFSWOnly(PCPUMCTX pCtx, uint16_t u16FSW)
4443{
4444 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4445 pCtx->fpu.FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4446}
4447
4448
4449/**
4450 * Pops one item off the FPU stack if no pending exception prevents it.
4451 *
4452 * @param pCtx The CPU context.
4453 */
4454static void iemFpuMaybePopOne(PCPUMCTX pCtx)
4455{
4456 /* Check pending exceptions. */
4457 uint16_t uFSW = pCtx->fpu.FSW;
4458 if ( (pCtx->fpu.FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4459 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4460 return;
4461
4462 /* TOP--. */
4463 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4464 uFSW &= ~X86_FSW_TOP_MASK;
4465 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4466 pCtx->fpu.FSW = uFSW;
4467
4468 /* Mark the previous ST0 as empty. */
4469 iOldTop >>= X86_FSW_TOP_SHIFT;
4470 pCtx->fpu.FTW &= ~RT_BIT(iOldTop);
4471
4472 /* Rotate the registers. */
4473 iemFpuRotateStackPop(pCtx);
4474}
4475
4476
4477/**
4478 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4479 *
4480 * @param pIemCpu The IEM per CPU data.
4481 * @param pResult The FPU operation result to push.
4482 */
4483static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
4484{
4485 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4486 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4487 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4488}
4489
4490
4491/**
4492 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4493 * and sets FPUDP and FPUDS.
4494 *
4495 * @param pIemCpu The IEM per CPU data.
4496 * @param pResult The FPU operation result to push.
4497 * @param iEffSeg The effective segment register.
4498 * @param GCPtrEff The effective address relative to @a iEffSeg.
4499 */
4500static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4501{
4502 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4503 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4504 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4505 iemFpuMaybePushResult(pIemCpu, pResult, pCtx);
4506}
4507
4508
4509/**
4510 * Replace ST0 with the first value and push the second onto the FPU stack,
4511 * unless a pending exception prevents it.
4512 *
4513 * @param pIemCpu The IEM per CPU data.
4514 * @param pResult The FPU operation result to store and push.
4515 */
4516static void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
4517{
4518 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4519 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4520
4521 /* Update FSW and bail if there are pending exceptions afterwards. */
4522 uint16_t fFsw = pCtx->fpu.FSW & ~X86_FSW_C_MASK;
4523 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4524 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4525 & ~(pCtx->fpu.FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4526 {
4527 pCtx->fpu.FSW = fFsw;
4528 return;
4529 }
4530
4531 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4532 if (!(pCtx->fpu.FTW & RT_BIT(iNewTop)))
4533 {
4534 /* All is fine, push the actual value. */
4535 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4536 pCtx->fpu.aRegs[0].r80 = pResult->r80Result1;
4537 pCtx->fpu.aRegs[7].r80 = pResult->r80Result2;
4538 }
4539 else if (pCtx->fpu.FCW & X86_FCW_IM)
4540 {
4541 /* Masked stack overflow, push QNaN. */
4542 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4543 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4544 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4545 }
4546 else
4547 {
4548 /* Raise stack overflow, don't push anything. */
4549 pCtx->fpu.FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4550 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4551 return;
4552 }
4553
4554 fFsw &= ~X86_FSW_TOP_MASK;
4555 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4556 pCtx->fpu.FSW = fFsw;
4557
4558 iemFpuRotateStackPush(pCtx);
4559}
4560
4561
4562/**
4563 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4564 * FOP.
4565 *
4566 * @param pIemCpu The IEM per CPU data.
4567 * @param pResult The result to store.
4568 * @param iStReg Which FPU register to store it in.
4569 * @param pCtx The CPU context.
4570 */
4571static void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4572{
4573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4574 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4575 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4576}
4577
4578
4579/**
4580 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4581 * FOP, and then pops the stack.
4582 *
4583 * @param pIemCpu The IEM per CPU data.
4584 * @param pResult The result to store.
4585 * @param iStReg Which FPU register to store it in.
4586 * @param pCtx The CPU context.
4587 */
4588static void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
4589{
4590 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4591 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4592 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4593 iemFpuMaybePopOne(pCtx);
4594}
4595
4596
4597/**
4598 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4599 * FPUDP, and FPUDS.
4600 *
4601 * @param pIemCpu The IEM per CPU data.
4602 * @param pResult The result to store.
4603 * @param iStReg Which FPU register to store it in.
4604 * @param pCtx The CPU context.
4605 * @param iEffSeg The effective memory operand selector register.
4606 * @param GCPtrEff The effective memory operand offset.
4607 */
4608static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4609{
4610 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4611 iemFpuUpdateDP(pIemCpu, pIemCpu->CTX_SUFF(pCtx), iEffSeg, GCPtrEff);
4612 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4613 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4614}
4615
4616
4617/**
4618 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4619 * FPUDP, and FPUDS, and then pops the stack.
4620 *
4621 * @param pIemCpu The IEM per CPU data.
4622 * @param pResult The result to store.
4623 * @param iStReg Which FPU register to store it in.
4624 * @param pCtx The CPU context.
4625 * @param iEffSeg The effective memory operand selector register.
4626 * @param GCPtrEff The effective memory operand offset.
4627 */
4628static void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
4629 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4630{
4631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4632 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4633 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4634 iemFpuStoreResultOnly(pIemCpu, pResult, iStReg, pCtx);
4635 iemFpuMaybePopOne(pCtx);
4636}
4637
4638
4639/**
4640 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4641 *
4642 * @param pIemCpu The IEM per CPU data.
4643 */
4644static void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
4645{
4646 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pIemCpu->CTX_SUFF(pCtx));
4647}
4648
4649
4650/**
4651 * Marks the specified stack register as free (for FFREE).
4652 *
4653 * @param pIemCpu The IEM per CPU data.
4654 * @param iStReg The register to free.
4655 */
4656static void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
4657{
4658 Assert(iStReg < 8);
4659 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4660 uint8_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4661 pCtx->fpu.FTW &= ~RT_BIT(iReg);
4662}
4663
4664
4665/**
4666 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4667 *
4668 * @param pIemCpu The IEM per CPU data.
4669 */
4670static void iemFpuStackIncTop(PIEMCPU pIemCpu)
4671{
4672 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4673 uint16_t uFsw = pCtx->fpu.FSW;
4674 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4675 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4676 uFsw &= ~X86_FSW_TOP_MASK;
4677 uFsw |= uTop;
4678 pCtx->fpu.FSW = uFsw;
4679}
4680
4681
4682/**
4683 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4684 *
4685 * @param pIemCpu The IEM per CPU data.
4686 */
4687static void iemFpuStackDecTop(PIEMCPU pIemCpu)
4688{
4689 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4690 uint16_t uFsw = pCtx->fpu.FSW;
4691 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4692 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4693 uFsw &= ~X86_FSW_TOP_MASK;
4694 uFsw |= uTop;
4695 pCtx->fpu.FSW = uFsw;
4696}
4697
4698
4699/**
4700 * Updates the FSW, FOP, FPUIP, and FPUCS.
4701 *
4702 * @param pIemCpu The IEM per CPU data.
4703 * @param u16FSW The FSW from the current instruction.
4704 */
4705static void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
4706{
4707 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4708 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4709 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4710}
4711
4712
4713/**
4714 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4715 *
4716 * @param pIemCpu The IEM per CPU data.
4717 * @param u16FSW The FSW from the current instruction.
4718 */
4719static void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4720{
4721 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4722 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4723 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4724 iemFpuMaybePopOne(pCtx);
4725}
4726
4727
4728/**
4729 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4730 *
4731 * @param pIemCpu The IEM per CPU data.
4732 * @param u16FSW The FSW from the current instruction.
4733 * @param iEffSeg The effective memory operand selector register.
4734 * @param GCPtrEff The effective memory operand offset.
4735 */
4736static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4737{
4738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4739 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4740 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4741 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4742}
4743
4744
4745/**
4746 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4747 *
4748 * @param pIemCpu The IEM per CPU data.
4749 * @param u16FSW The FSW from the current instruction.
4750 */
4751static void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
4752{
4753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4754 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4755 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4756 iemFpuMaybePopOne(pCtx);
4757 iemFpuMaybePopOne(pCtx);
4758}
4759
4760
4761/**
4762 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4763 *
4764 * @param pIemCpu The IEM per CPU data.
4765 * @param u16FSW The FSW from the current instruction.
4766 * @param iEffSeg The effective memory operand selector register.
4767 * @param GCPtrEff The effective memory operand offset.
4768 */
4769static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4770{
4771 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4772 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4773 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4774 iemFpuUpdateFSWOnly(pCtx, u16FSW);
4775 iemFpuMaybePopOne(pCtx);
4776}
4777
4778
4779/**
4780 * Worker routine for raising an FPU stack underflow exception.
4781 *
4782 * @param pIemCpu The IEM per CPU data.
4783 * @param iStReg The stack register being accessed.
4784 * @param pCtx The CPU context.
4785 */
4786static void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, uint8_t iStReg, PCPUMCTX pCtx)
4787{
4788 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4789 if (pCtx->fpu.FCW & X86_FCW_IM)
4790 {
4791 /* Masked underflow. */
4792 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4793 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4794 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4795 if (iStReg != UINT8_MAX)
4796 {
4797 pCtx->fpu.FTW |= RT_BIT(iReg);
4798 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4799 }
4800 }
4801 else
4802 {
4803 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4804 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4805 }
4806}
4807
4808
4809/**
4810 * Raises a FPU stack underflow exception.
4811 *
4812 * @param pIemCpu The IEM per CPU data.
4813 * @param iStReg The destination register that should be loaded
4814 * with QNaN if \#IS is not masked. Specify
4815 * UINT8_MAX if none (like for fcom).
4816 */
4817DECL_NO_INLINE(static, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
4818{
4819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4820 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4821 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4822}
4823
4824
4825DECL_NO_INLINE(static, void)
4826iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4827{
4828 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4829 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4830 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4831 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4832}
4833
4834
4835DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
4836{
4837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4838 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4839 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4840 iemFpuMaybePopOne(pCtx);
4841}
4842
4843
4844DECL_NO_INLINE(static, void)
4845iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4846{
4847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4848 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4849 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4850 iemFpuStackUnderflowOnly(pIemCpu, iStReg, pCtx);
4851 iemFpuMaybePopOne(pCtx);
4852}
4853
4854
4855DECL_NO_INLINE(static, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
4856{
4857 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4858 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4859 iemFpuStackUnderflowOnly(pIemCpu, UINT8_MAX, pCtx);
4860 iemFpuMaybePopOne(pCtx);
4861 iemFpuMaybePopOne(pCtx);
4862}
4863
4864
4865DECL_NO_INLINE(static, void)
4866iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
4867{
4868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4869 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4870
4871 if (pCtx->fpu.FCW & X86_FCW_IM)
4872 {
4873 /* Masked overflow - Push QNaN. */
4874 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4875 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4876 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4877 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4878 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4879 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4880 iemFpuRotateStackPush(pCtx);
4881 }
4882 else
4883 {
4884 /* Exception pending - don't change TOP or the register stack. */
4885 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4886 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4887 }
4888}
4889
4890
4891DECL_NO_INLINE(static, void)
4892iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
4893{
4894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4895 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4896
4897 if (pCtx->fpu.FCW & X86_FCW_IM)
4898 {
4899 /* Masked overflow - Push QNaN. */
4900 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4901 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4902 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4903 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4904 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4905 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4906 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4907 iemFpuRotateStackPush(pCtx);
4908 }
4909 else
4910 {
4911 /* Exception pending - don't change TOP or the register stack. */
4912 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4913 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4914 }
4915}
4916
4917
4918/**
4919 * Worker routine for raising an FPU stack overflow exception on a push.
4920 *
4921 * @param pIemCpu The IEM per CPU data.
4922 * @param pCtx The CPU context.
4923 */
4924static void iemFpuStackPushOverflowOnly(PIEMCPU pIemCpu, PCPUMCTX pCtx)
4925{
4926 if (pCtx->fpu.FCW & X86_FCW_IM)
4927 {
4928 /* Masked overflow. */
4929 uint16_t iNewTop = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + 7) & X86_FSW_TOP_SMASK;
4930 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4931 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4932 pCtx->fpu.FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4933 pCtx->fpu.FTW |= RT_BIT(iNewTop);
4934 iemFpuStoreQNan(&pCtx->fpu.aRegs[7].r80);
4935 iemFpuRotateStackPush(pCtx);
4936 }
4937 else
4938 {
4939 /* Exception pending - don't change TOP or the register stack. */
4940 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4941 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4942 }
4943}
4944
4945
4946/**
4947 * Raises a FPU stack overflow exception on a push.
4948 *
4949 * @param pIemCpu The IEM per CPU data.
4950 */
4951DECL_NO_INLINE(static, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
4952{
4953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4954 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4955 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4956}
4957
4958
4959/**
4960 * Raises a FPU stack overflow exception on a push with a memory operand.
4961 *
4962 * @param pIemCpu The IEM per CPU data.
4963 * @param iEffSeg The effective memory operand selector register.
4964 * @param GCPtrEff The effective memory operand offset.
4965 */
4966DECL_NO_INLINE(static, void)
4967iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4968{
4969 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4970 iemFpuUpdateDP(pIemCpu, pCtx, iEffSeg, GCPtrEff);
4971 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4972 iemFpuStackPushOverflowOnly(pIemCpu, pCtx);
4973}
4974
4975
4976static int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
4977{
4978 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4979 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4980 if (pCtx->fpu.FTW & RT_BIT(iReg))
4981 return VINF_SUCCESS;
4982 return VERR_NOT_FOUND;
4983}
4984
4985
4986static int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
4987{
4988 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4989 uint16_t iReg = (X86_FSW_TOP_GET(pCtx->fpu.FSW) + iStReg) & X86_FSW_TOP_SMASK;
4990 if (pCtx->fpu.FTW & RT_BIT(iReg))
4991 {
4992 *ppRef = &pCtx->fpu.aRegs[iStReg].r80;
4993 return VINF_SUCCESS;
4994 }
4995 return VERR_NOT_FOUND;
4996}
4997
4998
4999static int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5000 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5001{
5002 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5003 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5004 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5005 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5006 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5007 {
5008 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5009 *ppRef1 = &pCtx->fpu.aRegs[iStReg1].r80;
5010 return VINF_SUCCESS;
5011 }
5012 return VERR_NOT_FOUND;
5013}
5014
5015
5016static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5017{
5018 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5019 uint16_t iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5020 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5021 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5022 if ((pCtx->fpu.FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5023 {
5024 *ppRef0 = &pCtx->fpu.aRegs[iStReg0].r80;
5025 return VINF_SUCCESS;
5026 }
5027 return VERR_NOT_FOUND;
5028}
5029
5030
5031/**
5032 * Updates the FPU exception status after FCW is changed.
5033 *
5034 * @param pCtx The CPU context.
5035 */
5036static void iemFpuRecalcExceptionStatus(PCPUMCTX pCtx)
5037{
5038 uint16_t u16Fsw = pCtx->fpu.FSW;
5039 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pCtx->fpu.FCW & X86_FCW_XCPT_MASK))
5040 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5041 else
5042 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5043 pCtx->fpu.FSW = u16Fsw;
5044}
5045
5046
5047/**
5048 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
5049 *
5050 * @returns The full FTW.
5051 * @param pCtx The CPU state.
5052 */
5053static uint16_t iemFpuCalcFullFtw(PCCPUMCTX pCtx)
5054{
5055 uint8_t const u8Ftw = (uint8_t)pCtx->fpu.FTW;
5056 uint16_t u16Ftw = 0;
5057 unsigned const iTop = X86_FSW_TOP_GET(pCtx->fpu.FSW);
5058 for (unsigned iSt = 0; iSt < 8; iSt++)
5059 {
5060 unsigned const iReg = (iSt + iTop) & 7;
5061 if (!(u8Ftw & RT_BIT(iReg)))
5062 u16Ftw |= 3 << (iReg * 2); /* empty */
5063 else
5064 {
5065 uint16_t uTag;
5066 PCRTFLOAT80U const pr80Reg = &pCtx->fpu.aRegs[iSt].r80;
5067 if (pr80Reg->s.uExponent == 0x7fff)
5068 uTag = 2; /* Exponent is all 1's => Special. */
5069 else if (pr80Reg->s.uExponent == 0x0000)
5070 {
5071 if (pr80Reg->s.u64Mantissa == 0x0000)
5072 uTag = 1; /* All bits are zero => Zero. */
5073 else
5074 uTag = 2; /* Must be special. */
5075 }
5076 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
5077 uTag = 0; /* Valid. */
5078 else
5079 uTag = 2; /* Must be special. */
5080
5081 u16Ftw |= uTag << (iReg * 2); /* empty */
5082 }
5083 }
5084
5085 return u16Ftw;
5086}
5087
5088
5089/**
5090 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
5091 *
5092 * @returns The compressed FTW.
5093 * @param u16FullFtw The full FTW to convert.
5094 */
5095static uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
5096{
5097 uint8_t u8Ftw = 0;
5098 for (unsigned i = 0; i < 8; i++)
5099 {
5100 if ((u16FullFtw & 3) != 3 /*empty*/)
5101 u8Ftw |= RT_BIT(i);
5102 u16FullFtw >>= 2;
5103 }
5104
5105 return u8Ftw;
5106}
5107
5108/** @} */
5109
5110
5111/** @name Memory access.
5112 *
5113 * @{
5114 */
5115
5116
5117/**
5118 * Updates the IEMCPU::cbWritten counter if applicable.
5119 *
5120 * @param pIemCpu The IEM per CPU data.
5121 * @param fAccess The access being accounted for.
5122 * @param cbMem The access size.
5123 */
5124DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
5125{
5126 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5127 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5128 pIemCpu->cbWritten += (uint32_t)cbMem;
5129}
5130
5131
5132/**
5133 * Checks if the given segment can be written to, raise the appropriate
5134 * exception if not.
5135 *
5136 * @returns VBox strict status code.
5137 *
5138 * @param pIemCpu The IEM per CPU data.
5139 * @param pHid Pointer to the hidden register.
5140 * @param iSegReg The register number.
5141 * @param pu64BaseAddr Where to return the base address to use for the
5142 * segment. (In 64-bit code it may differ from the
5143 * base in the hidden segment.)
5144 */
5145static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
5146{
5147 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5148 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
5149 else
5150 {
5151 if (!pHid->Attr.n.u1Present)
5152 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
5153
5154 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
5155 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5156 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
5157 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
5158 *pu64BaseAddr = pHid->u64Base;
5159 }
5160 return VINF_SUCCESS;
5161}
5162
5163
5164/**
5165 * Checks if the given segment can be read from, raise the appropriate
5166 * exception if not.
5167 *
5168 * @returns VBox strict status code.
5169 *
5170 * @param pIemCpu The IEM per CPU data.
5171 * @param pHid Pointer to the hidden register.
5172 * @param iSegReg The register number.
5173 * @param pu64BaseAddr Where to return the base address to use for the
5174 * segment. (In 64-bit code it may differ from the
5175 * base in the hidden segment.)
5176 */
5177static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
5178{
5179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5180 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
5181 else
5182 {
5183 if (!pHid->Attr.n.u1Present)
5184 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
5185
5186 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
5187 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
5188 *pu64BaseAddr = pHid->u64Base;
5189 }
5190 return VINF_SUCCESS;
5191}
5192
5193
5194/**
5195 * Applies the segment limit, base and attributes.
5196 *
5197 * This may raise a \#GP or \#SS.
5198 *
5199 * @returns VBox strict status code.
5200 *
5201 * @param pIemCpu The IEM per CPU data.
5202 * @param fAccess The kind of access which is being performed.
5203 * @param iSegReg The index of the segment register to apply.
5204 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5205 * TSS, ++).
5206 * @param pGCPtrMem Pointer to the guest memory address to apply
5207 * segmentation to. Input and output parameter.
5208 */
5209static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,
5210 size_t cbMem, PRTGCPTR pGCPtrMem)
5211{
5212 if (iSegReg == UINT8_MAX)
5213 return VINF_SUCCESS;
5214
5215 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
5216 switch (pIemCpu->enmCpuMode)
5217 {
5218 case IEMMODE_16BIT:
5219 case IEMMODE_32BIT:
5220 {
5221 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5222 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5223
5224 Assert(pSel->Attr.n.u1Present);
5225 Assert(pSel->Attr.n.u1DescType);
5226 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5227 {
5228 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5229 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5230 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5231
5232 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5233 {
5234 /** @todo CPL check. */
5235 }
5236
5237 /*
5238 * There are two kinds of data selectors, normal and expand down.
5239 */
5240 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5241 {
5242 if ( GCPtrFirst32 > pSel->u32Limit
5243 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5244 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5245 }
5246 else
5247 {
5248 /*
5249 * The upper boundary is defined by the B bit, not the G bit!
5250 */
5251 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5252 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5253 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5254 }
5255 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5256 }
5257 else
5258 {
5259
5260 /*
5261 * Code selector and usually be used to read thru, writing is
5262 * only permitted in real and V8086 mode.
5263 */
5264 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5265 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5266 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5267 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
5268 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
5269
5270 if ( GCPtrFirst32 > pSel->u32Limit
5271 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5272 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
5273
5274 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5275 {
5276 /** @todo CPL check. */
5277 }
5278
5279 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5280 }
5281 return VINF_SUCCESS;
5282 }
5283
5284 case IEMMODE_64BIT:
5285 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5286 *pGCPtrMem += pSel->u64Base;
5287 return VINF_SUCCESS;
5288
5289 default:
5290 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
5291 }
5292}
5293
5294
5295/**
5296 * Translates a virtual address to a physical physical address and checks if we
5297 * can access the page as specified.
5298 *
5299 * @param pIemCpu The IEM per CPU data.
5300 * @param GCPtrMem The virtual address.
5301 * @param fAccess The intended access.
5302 * @param pGCPhysMem Where to return the physical address.
5303 */
5304static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
5305 PRTGCPHYS pGCPhysMem)
5306{
5307 /** @todo Need a different PGM interface here. We're currently using
5308 * generic / REM interfaces. this won't cut it for R0 & RC. */
5309 RTGCPHYS GCPhys;
5310 uint64_t fFlags;
5311 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
5312 if (RT_FAILURE(rc))
5313 {
5314 /** @todo Check unassigned memory in unpaged mode. */
5315 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5316 *pGCPhysMem = NIL_RTGCPHYS;
5317 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
5318 }
5319
5320 /* If the page is writable and does not have the no-exec bit set, all
5321 access is allowed. Otherwise we'll have to check more carefully... */
5322 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5323 {
5324 /* Write to read only memory? */
5325 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5326 && !(fFlags & X86_PTE_RW)
5327 && ( pIemCpu->uCpl != 0
5328 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
5329 {
5330 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5331 *pGCPhysMem = NIL_RTGCPHYS;
5332 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5333 }
5334
5335 /* Kernel memory accessed by userland? */
5336 if ( !(fFlags & X86_PTE_US)
5337 && pIemCpu->uCpl == 3
5338 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5339 {
5340 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5341 *pGCPhysMem = NIL_RTGCPHYS;
5342 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5343 }
5344
5345 /* Executing non-executable memory? */
5346 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5347 && (fFlags & X86_PTE_PAE_NX)
5348 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
5349 {
5350 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5351 *pGCPhysMem = NIL_RTGCPHYS;
5352 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5353 VERR_ACCESS_DENIED);
5354 }
5355 }
5356
5357 /*
5358 * Set the dirty / access flags.
5359 * ASSUMES this is set when the address is translated rather than on committ...
5360 */
5361 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5362 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5363 if ((fFlags & fAccessedDirty) != fAccessedDirty)
5364 {
5365 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5366 AssertRC(rc2);
5367 }
5368
5369 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
5370 *pGCPhysMem = GCPhys;
5371 return VINF_SUCCESS;
5372}
5373
5374
5375
5376/**
5377 * Maps a physical page.
5378 *
5379 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
5380 * @param pIemCpu The IEM per CPU data.
5381 * @param GCPhysMem The physical address.
5382 * @param fAccess The intended access.
5383 * @param ppvMem Where to return the mapping address.
5384 * @param pLock The PGM lock.
5385 */
5386static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
5387{
5388#ifdef IEM_VERIFICATION_MODE_FULL
5389 /* Force the alternative path so we can ignore writes. */
5390 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
5391 {
5392 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5393 {
5394 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
5395 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
5396 if (RT_FAILURE(rc2))
5397 pIemCpu->fProblematicMemory = true;
5398 }
5399 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5400 }
5401#endif
5402#ifdef IEM_LOG_MEMORY_WRITES
5403 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5404 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5405#endif
5406#ifdef IEM_VERIFICATION_MODE_MINIMAL
5407 return VERR_PGM_PHYS_TLB_CATCH_ALL;
5408#endif
5409
5410 /** @todo This API may require some improving later. A private deal with PGM
5411 * regarding locking and unlocking needs to be struct. A couple of TLBs
5412 * living in PGM, but with publicly accessible inlined access methods
5413 * could perhaps be an even better solution. */
5414 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),
5415 GCPhysMem,
5416 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
5417 pIemCpu->fBypassHandlers,
5418 ppvMem,
5419 pLock);
5420 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
5421 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5422
5423#ifdef IEM_VERIFICATION_MODE_FULL
5424 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5425 pIemCpu->fProblematicMemory = true;
5426#endif
5427 return rc;
5428}
5429
5430
5431/**
5432 * Unmap a page previously mapped by iemMemPageMap.
5433 *
5434 * @param pIemCpu The IEM per CPU data.
5435 * @param GCPhysMem The physical address.
5436 * @param fAccess The intended access.
5437 * @param pvMem What iemMemPageMap returned.
5438 * @param pLock The PGM lock.
5439 */
5440DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
5441{
5442 NOREF(pIemCpu);
5443 NOREF(GCPhysMem);
5444 NOREF(fAccess);
5445 NOREF(pvMem);
5446 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
5447}
5448
5449
5450/**
5451 * Looks up a memory mapping entry.
5452 *
5453 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5454 * @param pIemCpu The IEM per CPU data.
5455 * @param pvMem The memory address.
5456 * @param fAccess The access to.
5457 */
5458DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5459{
5460 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5461 if ( pIemCpu->aMemMappings[0].pv == pvMem
5462 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5463 return 0;
5464 if ( pIemCpu->aMemMappings[1].pv == pvMem
5465 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5466 return 1;
5467 if ( pIemCpu->aMemMappings[2].pv == pvMem
5468 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5469 return 2;
5470 return VERR_NOT_FOUND;
5471}
5472
5473
5474/**
5475 * Finds a free memmap entry when using iNextMapping doesn't work.
5476 *
5477 * @returns Memory mapping index, 1024 on failure.
5478 * @param pIemCpu The IEM per CPU data.
5479 */
5480static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
5481{
5482 /*
5483 * The easy case.
5484 */
5485 if (pIemCpu->cActiveMappings == 0)
5486 {
5487 pIemCpu->iNextMapping = 1;
5488 return 0;
5489 }
5490
5491 /* There should be enough mappings for all instructions. */
5492 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
5493
5494 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
5495 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5496 return i;
5497
5498 AssertFailedReturn(1024);
5499}
5500
5501
5502/**
5503 * Commits a bounce buffer that needs writing back and unmaps it.
5504 *
5505 * @returns Strict VBox status code.
5506 * @param pIemCpu The IEM per CPU data.
5507 * @param iMemMap The index of the buffer to commit.
5508 */
5509static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
5510{
5511 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5512 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5513
5514 /*
5515 * Do the writing.
5516 */
5517 int rc;
5518#ifndef IEM_VERIFICATION_MODE_MINIMAL
5519 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
5520 && !IEM_VERIFICATION_ENABLED(pIemCpu))
5521 {
5522 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5523 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5524 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5525 if (!pIemCpu->fBypassHandlers)
5526 {
5527 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5528 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5529 pbBuf,
5530 cbFirst);
5531 if (cbSecond && rc == VINF_SUCCESS)
5532 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),
5533 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5534 pbBuf + cbFirst,
5535 cbSecond);
5536 }
5537 else
5538 {
5539 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5540 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5541 pbBuf,
5542 cbFirst);
5543 if (cbSecond && rc == VINF_SUCCESS)
5544 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),
5545 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5546 pbBuf + cbFirst,
5547 cbSecond);
5548 }
5549 if (rc != VINF_SUCCESS)
5550 {
5551 /** @todo status code handling */
5552 Log(("iemMemBounceBufferCommitAndUnmap: %s GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5553 pIemCpu->fBypassHandlers ? "PGMPhysWrite" : "PGMPhysSimpleWriteGCPhys",
5554 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5555 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5556 }
5557 }
5558 else
5559#endif
5560 rc = VINF_SUCCESS;
5561
5562#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5563 /*
5564 * Record the write(s).
5565 */
5566 if (!pIemCpu->fNoRem)
5567 {
5568 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5569 if (pEvtRec)
5570 {
5571 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5572 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
5573 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
5574 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
5575 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
5576 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5577 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5578 }
5579 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5580 {
5581 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5582 if (pEvtRec)
5583 {
5584 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
5585 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
5586 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5587 memcpy(pEvtRec->u.RamWrite.ab,
5588 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
5589 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
5590 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5591 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5592 }
5593 }
5594 }
5595#endif
5596#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
5597 if (rc == VINF_SUCCESS)
5598 {
5599 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
5600 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
5601 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
5602 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
5603 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
5604 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
5605
5606 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
5607 g_cbIemWrote = cbWrote;
5608 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5609 }
5610#endif
5611
5612 /*
5613 * Free the mapping entry.
5614 */
5615 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5616 Assert(pIemCpu->cActiveMappings != 0);
5617 pIemCpu->cActiveMappings--;
5618 return rc;
5619}
5620
5621
5622/**
5623 * iemMemMap worker that deals with a request crossing pages.
5624 */
5625static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,
5626 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5627{
5628 /*
5629 * Do the address translations.
5630 */
5631 RTGCPHYS GCPhysFirst;
5632 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5633 if (rcStrict != VINF_SUCCESS)
5634 return rcStrict;
5635
5636/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
5637 * last byte. */
5638 RTGCPHYS GCPhysSecond;
5639 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
5640 if (rcStrict != VINF_SUCCESS)
5641 return rcStrict;
5642 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
5643
5644#ifdef IEM_VERIFICATION_MODE_FULL
5645 /*
5646 * Detect problematic memory when verifying so we can select
5647 * the right execution engine. (TLB: Redo this.)
5648 */
5649 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5650 {
5651 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysFirst,
5652 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
5653 if (RT_SUCCESS(rc2))
5654 rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysSecond,
5655 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
5656 if (RT_FAILURE(rc2))
5657 pIemCpu->fProblematicMemory = true;
5658 }
5659#endif
5660
5661
5662 /*
5663 * Read in the current memory content if it's a read, execute or partial
5664 * write access.
5665 */
5666 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5667 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
5668 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5669
5670 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5671 {
5672 int rc;
5673 if (!pIemCpu->fBypassHandlers)
5674 {
5675 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);
5676 if (rc != VINF_SUCCESS)
5677 {
5678 /** @todo status code handling */
5679 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5680 return rc;
5681 }
5682 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);
5683 if (rc != VINF_SUCCESS)
5684 {
5685 /** @todo status code handling */
5686 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5687 return rc;
5688 }
5689 }
5690 else
5691 {
5692 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);
5693 if (rc != VINF_SUCCESS)
5694 {
5695 /** @todo status code handling */
5696 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5697 return rc;
5698 }
5699 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5700 if (rc != VINF_SUCCESS)
5701 {
5702 /** @todo status code handling */
5703 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5704 return rc;
5705 }
5706 }
5707
5708#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5709 if ( !pIemCpu->fNoRem
5710 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5711 {
5712 /*
5713 * Record the reads.
5714 */
5715 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5716 if (pEvtRec)
5717 {
5718 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5719 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5720 pEvtRec->u.RamRead.cb = cbFirstPage;
5721 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5722 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5723 }
5724 pEvtRec = iemVerifyAllocRecord(pIemCpu);
5725 if (pEvtRec)
5726 {
5727 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5728 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
5729 pEvtRec->u.RamRead.cb = cbSecondPage;
5730 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5731 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5732 }
5733 }
5734#endif
5735 }
5736#ifdef VBOX_STRICT
5737 else
5738 memset(pbBuf, 0xcc, cbMem);
5739#endif
5740#ifdef VBOX_STRICT
5741 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5742 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5743#endif
5744
5745 /*
5746 * Commit the bounce buffer entry.
5747 */
5748 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5749 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5750 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5751 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5752 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
5753 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5754 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5755 pIemCpu->iNextMapping = iMemMap + 1;
5756 pIemCpu->cActiveMappings++;
5757
5758 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5759 *ppvMem = pbBuf;
5760 return VINF_SUCCESS;
5761}
5762
5763
5764/**
5765 * iemMemMap woker that deals with iemMemPageMap failures.
5766 */
5767static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5768 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5769{
5770 /*
5771 * Filter out conditions we can handle and the ones which shouldn't happen.
5772 */
5773 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5774 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5775 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5776 {
5777 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);
5778 return rcMap;
5779 }
5780 pIemCpu->cPotentialExits++;
5781
5782 /*
5783 * Read in the current memory content if it's a read, execute or partial
5784 * write access.
5785 */
5786 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
5787 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5788 {
5789 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5790 memset(pbBuf, 0xff, cbMem);
5791 else
5792 {
5793 int rc;
5794 if (!pIemCpu->fBypassHandlers)
5795 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);
5796 else
5797 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
5798 if (rc != VINF_SUCCESS)
5799 {
5800 /** @todo status code handling */
5801 Log(("iemMemBounceBufferMapPhys: %s GCPhysFirst=%RGp rc=%Rrc (!!)\n",
5802 pIemCpu->fBypassHandlers ? "PGMPhysRead" : "PGMPhysSimpleReadGCPhys", GCPhysFirst, rc));
5803 return rc;
5804 }
5805 }
5806
5807#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
5808 if ( !pIemCpu->fNoRem
5809 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
5810 {
5811 /*
5812 * Record the read.
5813 */
5814 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
5815 if (pEvtRec)
5816 {
5817 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
5818 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
5819 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
5820 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
5821 *pIemCpu->ppIemEvtRecNext = pEvtRec;
5822 }
5823 }
5824#endif
5825 }
5826#ifdef VBOX_STRICT
5827 else
5828 memset(pbBuf, 0xcc, cbMem);
5829#endif
5830#ifdef VBOX_STRICT
5831 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
5832 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
5833#endif
5834
5835 /*
5836 * Commit the bounce buffer entry.
5837 */
5838 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5839 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5840 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5841 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
5842 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5843 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
5844 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5845 pIemCpu->iNextMapping = iMemMap + 1;
5846 pIemCpu->cActiveMappings++;
5847
5848 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5849 *ppvMem = pbBuf;
5850 return VINF_SUCCESS;
5851}
5852
5853
5854
5855/**
5856 * Maps the specified guest memory for the given kind of access.
5857 *
5858 * This may be using bounce buffering of the memory if it's crossing a page
5859 * boundary or if there is an access handler installed for any of it. Because
5860 * of lock prefix guarantees, we're in for some extra clutter when this
5861 * happens.
5862 *
5863 * This may raise a \#GP, \#SS, \#PF or \#AC.
5864 *
5865 * @returns VBox strict status code.
5866 *
5867 * @param pIemCpu The IEM per CPU data.
5868 * @param ppvMem Where to return the pointer to the mapped
5869 * memory.
5870 * @param cbMem The number of bytes to map. This is usually 1,
5871 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5872 * string operations it can be up to a page.
5873 * @param iSegReg The index of the segment register to use for
5874 * this access. The base and limits are checked.
5875 * Use UINT8_MAX to indicate that no segmentation
5876 * is required (for IDT, GDT and LDT accesses).
5877 * @param GCPtrMem The address of the guest memory.
5878 * @param a_fAccess How the memory is being accessed. The
5879 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5880 * how to map the memory, while the
5881 * IEM_ACCESS_WHAT_XXX bit is used when raising
5882 * exceptions.
5883 */
5884static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
5885{
5886 /*
5887 * Check the input and figure out which mapping entry to use.
5888 */
5889 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 94); /* 512 is the max! */
5890 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5891
5892 unsigned iMemMap = pIemCpu->iNextMapping;
5893 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
5894 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5895 {
5896 iMemMap = iemMemMapFindFree(pIemCpu);
5897 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);
5898 }
5899
5900 /*
5901 * Map the memory, checking that we can actually access it. If something
5902 * slightly complicated happens, fall back on bounce buffering.
5903 */
5904 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5905 if (rcStrict != VINF_SUCCESS)
5906 return rcStrict;
5907
5908 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
5909 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5910
5911 RTGCPHYS GCPhysFirst;
5912 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
5913 if (rcStrict != VINF_SUCCESS)
5914 return rcStrict;
5915
5916 void *pvMem;
5917 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5918 if (rcStrict != VINF_SUCCESS)
5919 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5920
5921 /*
5922 * Fill in the mapping table entry.
5923 */
5924 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
5925 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
5926 pIemCpu->iNextMapping = iMemMap + 1;
5927 pIemCpu->cActiveMappings++;
5928
5929 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
5930 *ppvMem = pvMem;
5931 return VINF_SUCCESS;
5932}
5933
5934
5935/**
5936 * Commits the guest memory if bounce buffered and unmaps it.
5937 *
5938 * @returns Strict VBox status code.
5939 * @param pIemCpu The IEM per CPU data.
5940 * @param pvMem The mapping.
5941 * @param fAccess The kind of access.
5942 */
5943static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
5944{
5945 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
5946 AssertReturn(iMemMap >= 0, iMemMap);
5947
5948 /* If it's bounce buffered, we may need to write back the buffer. */
5949 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
5950 {
5951 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
5952 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
5953 }
5954 /* Otherwise unlock it. */
5955 else
5956 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5957
5958 /* Free the entry. */
5959 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5960 Assert(pIemCpu->cActiveMappings != 0);
5961 pIemCpu->cActiveMappings--;
5962 return VINF_SUCCESS;
5963}
5964
5965
5966/**
5967 * Rollbacks mappings, releasing page locks and such.
5968 *
5969 * The caller shall only call this after checking cActiveMappings.
5970 *
5971 * @returns Strict VBox status code to pass up.
5972 * @param pIemCpu The IEM per CPU data.
5973 */
5974static void iemMemRollback(PIEMCPU pIemCpu)
5975{
5976 Assert(pIemCpu->cActiveMappings > 0);
5977
5978 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
5979 while (iMemMap-- > 0)
5980 {
5981 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
5982 if (fAccess != IEM_ACCESS_INVALID)
5983 {
5984 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5985 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
5986 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
5987 Assert(pIemCpu->cActiveMappings > 0);
5988 pIemCpu->cActiveMappings--;
5989 }
5990 }
5991}
5992
5993
5994/**
5995 * Fetches a data byte.
5996 *
5997 * @returns Strict VBox status code.
5998 * @param pIemCpu The IEM per CPU data.
5999 * @param pu8Dst Where to return the byte.
6000 * @param iSegReg The index of the segment register to use for
6001 * this access. The base and limits are checked.
6002 * @param GCPtrMem The address of the guest memory.
6003 */
6004static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6005{
6006 /* The lazy approach for now... */
6007 uint8_t const *pu8Src;
6008 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6009 if (rc == VINF_SUCCESS)
6010 {
6011 *pu8Dst = *pu8Src;
6012 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6013 }
6014 return rc;
6015}
6016
6017
6018/**
6019 * Fetches a data word.
6020 *
6021 * @returns Strict VBox status code.
6022 * @param pIemCpu The IEM per CPU data.
6023 * @param pu16Dst Where to return the word.
6024 * @param iSegReg The index of the segment register to use for
6025 * this access. The base and limits are checked.
6026 * @param GCPtrMem The address of the guest memory.
6027 */
6028static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6029{
6030 /* The lazy approach for now... */
6031 uint16_t const *pu16Src;
6032 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6033 if (rc == VINF_SUCCESS)
6034 {
6035 *pu16Dst = *pu16Src;
6036 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6037 }
6038 return rc;
6039}
6040
6041
6042/**
6043 * Fetches a data dword.
6044 *
6045 * @returns Strict VBox status code.
6046 * @param pIemCpu The IEM per CPU data.
6047 * @param pu32Dst Where to return the dword.
6048 * @param iSegReg The index of the segment register to use for
6049 * this access. The base and limits are checked.
6050 * @param GCPtrMem The address of the guest memory.
6051 */
6052static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6053{
6054 /* The lazy approach for now... */
6055 uint32_t const *pu32Src;
6056 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6057 if (rc == VINF_SUCCESS)
6058 {
6059 *pu32Dst = *pu32Src;
6060 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6061 }
6062 return rc;
6063}
6064
6065
6066#ifdef SOME_UNUSED_FUNCTION
6067/**
6068 * Fetches a data dword and sign extends it to a qword.
6069 *
6070 * @returns Strict VBox status code.
6071 * @param pIemCpu The IEM per CPU data.
6072 * @param pu64Dst Where to return the sign extended value.
6073 * @param iSegReg The index of the segment register to use for
6074 * this access. The base and limits are checked.
6075 * @param GCPtrMem The address of the guest memory.
6076 */
6077static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6078{
6079 /* The lazy approach for now... */
6080 int32_t const *pi32Src;
6081 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6082 if (rc == VINF_SUCCESS)
6083 {
6084 *pu64Dst = *pi32Src;
6085 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6086 }
6087#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6088 else
6089 *pu64Dst = 0;
6090#endif
6091 return rc;
6092}
6093#endif
6094
6095
6096/**
6097 * Fetches a data qword.
6098 *
6099 * @returns Strict VBox status code.
6100 * @param pIemCpu The IEM per CPU data.
6101 * @param pu64Dst Where to return the qword.
6102 * @param iSegReg The index of the segment register to use for
6103 * this access. The base and limits are checked.
6104 * @param GCPtrMem The address of the guest memory.
6105 */
6106static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6107{
6108 /* The lazy approach for now... */
6109 uint64_t const *pu64Src;
6110 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6111 if (rc == VINF_SUCCESS)
6112 {
6113 *pu64Dst = *pu64Src;
6114 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6115 }
6116 return rc;
6117}
6118
6119
6120/**
6121 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6122 *
6123 * @returns Strict VBox status code.
6124 * @param pIemCpu The IEM per CPU data.
6125 * @param pu64Dst Where to return the qword.
6126 * @param iSegReg The index of the segment register to use for
6127 * this access. The base and limits are checked.
6128 * @param GCPtrMem The address of the guest memory.
6129 */
6130static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6131{
6132 /* The lazy approach for now... */
6133 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6134 if (RT_UNLIKELY(GCPtrMem & 15))
6135 return iemRaiseGeneralProtectionFault0(pIemCpu);
6136
6137 uint64_t const *pu64Src;
6138 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6139 if (rc == VINF_SUCCESS)
6140 {
6141 *pu64Dst = *pu64Src;
6142 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6143 }
6144 return rc;
6145}
6146
6147
6148/**
6149 * Fetches a data tword.
6150 *
6151 * @returns Strict VBox status code.
6152 * @param pIemCpu The IEM per CPU data.
6153 * @param pr80Dst Where to return the tword.
6154 * @param iSegReg The index of the segment register to use for
6155 * this access. The base and limits are checked.
6156 * @param GCPtrMem The address of the guest memory.
6157 */
6158static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6159{
6160 /* The lazy approach for now... */
6161 PCRTFLOAT80U pr80Src;
6162 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6163 if (rc == VINF_SUCCESS)
6164 {
6165 *pr80Dst = *pr80Src;
6166 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6167 }
6168 return rc;
6169}
6170
6171
6172/**
6173 * Fetches a data dqword (double qword), generally SSE related.
6174 *
6175 * @returns Strict VBox status code.
6176 * @param pIemCpu The IEM per CPU data.
6177 * @param pu128Dst Where to return the qword.
6178 * @param iSegReg The index of the segment register to use for
6179 * this access. The base and limits are checked.
6180 * @param GCPtrMem The address of the guest memory.
6181 */
6182static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6183{
6184 /* The lazy approach for now... */
6185 uint128_t const *pu128Src;
6186 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6187 if (rc == VINF_SUCCESS)
6188 {
6189 *pu128Dst = *pu128Src;
6190 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6191 }
6192 return rc;
6193}
6194
6195
6196/**
6197 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6198 * related.
6199 *
6200 * Raises GP(0) if not aligned.
6201 *
6202 * @returns Strict VBox status code.
6203 * @param pIemCpu The IEM per CPU data.
6204 * @param pu128Dst Where to return the qword.
6205 * @param iSegReg The index of the segment register to use for
6206 * this access. The base and limits are checked.
6207 * @param GCPtrMem The address of the guest memory.
6208 */
6209static VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
6210{
6211 /* The lazy approach for now... */
6212 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6213 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6214 return iemRaiseGeneralProtectionFault0(pIemCpu);
6215
6216 uint128_t const *pu128Src;
6217 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6218 if (rc == VINF_SUCCESS)
6219 {
6220 *pu128Dst = *pu128Src;
6221 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6222 }
6223 return rc;
6224}
6225
6226
6227
6228
6229/**
6230 * Fetches a descriptor register (lgdt, lidt).
6231 *
6232 * @returns Strict VBox status code.
6233 * @param pIemCpu The IEM per CPU data.
6234 * @param pcbLimit Where to return the limit.
6235 * @param pGCPTrBase Where to return the base.
6236 * @param iSegReg The index of the segment register to use for
6237 * this access. The base and limits are checked.
6238 * @param GCPtrMem The address of the guest memory.
6239 * @param enmOpSize The effective operand size.
6240 */
6241static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,
6242 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6243{
6244 uint8_t const *pu8Src;
6245 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6246 (void **)&pu8Src,
6247 enmOpSize == IEMMODE_64BIT
6248 ? 2 + 8
6249 : enmOpSize == IEMMODE_32BIT
6250 ? 2 + 4
6251 : 2 + 3,
6252 iSegReg,
6253 GCPtrMem,
6254 IEM_ACCESS_DATA_R);
6255 if (rcStrict == VINF_SUCCESS)
6256 {
6257 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
6258 switch (enmOpSize)
6259 {
6260 case IEMMODE_16BIT:
6261 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
6262 break;
6263 case IEMMODE_32BIT:
6264 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
6265 break;
6266 case IEMMODE_64BIT:
6267 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
6268 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
6269 break;
6270
6271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6272 }
6273 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6274 }
6275 return rcStrict;
6276}
6277
6278
6279
6280/**
6281 * Stores a data byte.
6282 *
6283 * @returns Strict VBox status code.
6284 * @param pIemCpu The IEM per CPU data.
6285 * @param iSegReg The index of the segment register to use for
6286 * this access. The base and limits are checked.
6287 * @param GCPtrMem The address of the guest memory.
6288 * @param u8Value The value to store.
6289 */
6290static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
6291{
6292 /* The lazy approach for now... */
6293 uint8_t *pu8Dst;
6294 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6295 if (rc == VINF_SUCCESS)
6296 {
6297 *pu8Dst = u8Value;
6298 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
6299 }
6300 return rc;
6301}
6302
6303
6304/**
6305 * Stores a data word.
6306 *
6307 * @returns Strict VBox status code.
6308 * @param pIemCpu The IEM per CPU data.
6309 * @param iSegReg The index of the segment register to use for
6310 * this access. The base and limits are checked.
6311 * @param GCPtrMem The address of the guest memory.
6312 * @param u16Value The value to store.
6313 */
6314static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
6315{
6316 /* The lazy approach for now... */
6317 uint16_t *pu16Dst;
6318 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6319 if (rc == VINF_SUCCESS)
6320 {
6321 *pu16Dst = u16Value;
6322 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
6323 }
6324 return rc;
6325}
6326
6327
6328/**
6329 * Stores a data dword.
6330 *
6331 * @returns Strict VBox status code.
6332 * @param pIemCpu The IEM per CPU data.
6333 * @param iSegReg The index of the segment register to use for
6334 * this access. The base and limits are checked.
6335 * @param GCPtrMem The address of the guest memory.
6336 * @param u32Value The value to store.
6337 */
6338static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
6339{
6340 /* The lazy approach for now... */
6341 uint32_t *pu32Dst;
6342 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6343 if (rc == VINF_SUCCESS)
6344 {
6345 *pu32Dst = u32Value;
6346 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
6347 }
6348 return rc;
6349}
6350
6351
6352/**
6353 * Stores a data qword.
6354 *
6355 * @returns Strict VBox status code.
6356 * @param pIemCpu The IEM per CPU data.
6357 * @param iSegReg The index of the segment register to use for
6358 * this access. The base and limits are checked.
6359 * @param GCPtrMem The address of the guest memory.
6360 * @param u64Value The value to store.
6361 */
6362static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
6363{
6364 /* The lazy approach for now... */
6365 uint64_t *pu64Dst;
6366 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6367 if (rc == VINF_SUCCESS)
6368 {
6369 *pu64Dst = u64Value;
6370 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
6371 }
6372 return rc;
6373}
6374
6375
6376/**
6377 * Stores a data dqword.
6378 *
6379 * @returns Strict VBox status code.
6380 * @param pIemCpu The IEM per CPU data.
6381 * @param iSegReg The index of the segment register to use for
6382 * this access. The base and limits are checked.
6383 * @param GCPtrMem The address of the guest memory.
6384 * @param u64Value The value to store.
6385 */
6386static VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6387{
6388 /* The lazy approach for now... */
6389 uint128_t *pu128Dst;
6390 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6391 if (rc == VINF_SUCCESS)
6392 {
6393 *pu128Dst = u128Value;
6394 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6395 }
6396 return rc;
6397}
6398
6399
6400/**
6401 * Stores a data dqword, SSE aligned.
6402 *
6403 * @returns Strict VBox status code.
6404 * @param pIemCpu The IEM per CPU data.
6405 * @param iSegReg The index of the segment register to use for
6406 * this access. The base and limits are checked.
6407 * @param GCPtrMem The address of the guest memory.
6408 * @param u64Value The value to store.
6409 */
6410static VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
6411{
6412 /* The lazy approach for now... */
6413 if ((GCPtrMem & 15) && !(pIemCpu->CTX_SUFF(pCtx)->fpu.MXCSR & X86_MSXCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6414 return iemRaiseGeneralProtectionFault0(pIemCpu);
6415
6416 uint128_t *pu128Dst;
6417 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
6418 if (rc == VINF_SUCCESS)
6419 {
6420 *pu128Dst = u128Value;
6421 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
6422 }
6423 return rc;
6424}
6425
6426
6427/**
6428 * Stores a descriptor register (sgdt, sidt).
6429 *
6430 * @returns Strict VBox status code.
6431 * @param pIemCpu The IEM per CPU data.
6432 * @param cbLimit The limit.
6433 * @param GCPTrBase The base address.
6434 * @param iSegReg The index of the segment register to use for
6435 * this access. The base and limits are checked.
6436 * @param GCPtrMem The address of the guest memory.
6437 * @param enmOpSize The effective operand size.
6438 */
6439static VBOXSTRICTRC iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase,
6440 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
6441{
6442 uint8_t *pu8Src;
6443 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
6444 (void **)&pu8Src,
6445 enmOpSize == IEMMODE_64BIT
6446 ? 2 + 8
6447 : enmOpSize == IEMMODE_32BIT
6448 ? 2 + 4
6449 : 2 + 3,
6450 iSegReg,
6451 GCPtrMem,
6452 IEM_ACCESS_DATA_W);
6453 if (rcStrict == VINF_SUCCESS)
6454 {
6455 pu8Src[0] = RT_BYTE1(cbLimit);
6456 pu8Src[1] = RT_BYTE2(cbLimit);
6457 pu8Src[2] = RT_BYTE1(GCPtrBase);
6458 pu8Src[3] = RT_BYTE2(GCPtrBase);
6459 pu8Src[4] = RT_BYTE3(GCPtrBase);
6460 if (enmOpSize == IEMMODE_16BIT)
6461 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
6462 else
6463 {
6464 pu8Src[5] = RT_BYTE4(GCPtrBase);
6465 if (enmOpSize == IEMMODE_64BIT)
6466 {
6467 pu8Src[6] = RT_BYTE5(GCPtrBase);
6468 pu8Src[7] = RT_BYTE6(GCPtrBase);
6469 pu8Src[8] = RT_BYTE7(GCPtrBase);
6470 pu8Src[9] = RT_BYTE8(GCPtrBase);
6471 }
6472 }
6473 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
6474 }
6475 return rcStrict;
6476}
6477
6478
6479/**
6480 * Pushes a word onto the stack.
6481 *
6482 * @returns Strict VBox status code.
6483 * @param pIemCpu The IEM per CPU data.
6484 * @param u16Value The value to push.
6485 */
6486static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
6487{
6488 /* Increment the stack pointer. */
6489 uint64_t uNewRsp;
6490 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6491 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
6492
6493 /* Write the word the lazy way. */
6494 uint16_t *pu16Dst;
6495 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6496 if (rc == VINF_SUCCESS)
6497 {
6498 *pu16Dst = u16Value;
6499 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6500 }
6501
6502 /* Commit the new RSP value unless we an access handler made trouble. */
6503 if (rc == VINF_SUCCESS)
6504 pCtx->rsp = uNewRsp;
6505
6506 return rc;
6507}
6508
6509
6510/**
6511 * Pushes a dword onto the stack.
6512 *
6513 * @returns Strict VBox status code.
6514 * @param pIemCpu The IEM per CPU data.
6515 * @param u32Value The value to push.
6516 */
6517static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
6518{
6519 /* Increment the stack pointer. */
6520 uint64_t uNewRsp;
6521 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6522 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
6523
6524 /* Write the dword the lazy way. */
6525 uint32_t *pu32Dst;
6526 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6527 if (rc == VINF_SUCCESS)
6528 {
6529 *pu32Dst = u32Value;
6530 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6531 }
6532
6533 /* Commit the new RSP value unless we an access handler made trouble. */
6534 if (rc == VINF_SUCCESS)
6535 pCtx->rsp = uNewRsp;
6536
6537 return rc;
6538}
6539
6540
6541/**
6542 * Pushes a dword segment register value onto the stack.
6543 *
6544 * @returns Strict VBox status code.
6545 * @param pIemCpu The IEM per CPU data.
6546 * @param u16Value The value to push.
6547 */
6548static VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
6549{
6550 /* Increment the stack pointer. */
6551 uint64_t uNewRsp;
6552 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6553 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
6554
6555 VBOXSTRICTRC rc;
6556 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
6557 {
6558 /* The recompiler writes a full dword. */
6559 uint32_t *pu32Dst;
6560 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6561 if (rc == VINF_SUCCESS)
6562 {
6563 *pu32Dst = u32Value;
6564 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6565 }
6566 }
6567 else
6568 {
6569 /* The intel docs talks about zero extending the selector register
6570 value. My actual intel CPU here might be zero extending the value
6571 but it still only writes the lower word... */
6572 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
6573 * happens when crossing an electric page boundrary, is the high word
6574 * checked for write accessibility or not? Probably it is. What about
6575 * segment limits? */
6576 uint16_t *pu16Dst;
6577 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
6578 if (rc == VINF_SUCCESS)
6579 {
6580 *pu16Dst = (uint16_t)u32Value;
6581 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
6582 }
6583 }
6584
6585 /* Commit the new RSP value unless we an access handler made trouble. */
6586 if (rc == VINF_SUCCESS)
6587 pCtx->rsp = uNewRsp;
6588
6589 return rc;
6590}
6591
6592
6593/**
6594 * Pushes a qword onto the stack.
6595 *
6596 * @returns Strict VBox status code.
6597 * @param pIemCpu The IEM per CPU data.
6598 * @param u64Value The value to push.
6599 */
6600static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
6601{
6602 /* Increment the stack pointer. */
6603 uint64_t uNewRsp;
6604 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6605 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
6606
6607 /* Write the word the lazy way. */
6608 uint64_t *pu64Dst;
6609 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6610 if (rc == VINF_SUCCESS)
6611 {
6612 *pu64Dst = u64Value;
6613 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6614 }
6615
6616 /* Commit the new RSP value unless we an access handler made trouble. */
6617 if (rc == VINF_SUCCESS)
6618 pCtx->rsp = uNewRsp;
6619
6620 return rc;
6621}
6622
6623
6624/**
6625 * Pops a word from the stack.
6626 *
6627 * @returns Strict VBox status code.
6628 * @param pIemCpu The IEM per CPU data.
6629 * @param pu16Value Where to store the popped value.
6630 */
6631static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
6632{
6633 /* Increment the stack pointer. */
6634 uint64_t uNewRsp;
6635 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6636 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
6637
6638 /* Write the word the lazy way. */
6639 uint16_t const *pu16Src;
6640 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6641 if (rc == VINF_SUCCESS)
6642 {
6643 *pu16Value = *pu16Src;
6644 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6645
6646 /* Commit the new RSP value. */
6647 if (rc == VINF_SUCCESS)
6648 pCtx->rsp = uNewRsp;
6649 }
6650
6651 return rc;
6652}
6653
6654
6655/**
6656 * Pops a dword from the stack.
6657 *
6658 * @returns Strict VBox status code.
6659 * @param pIemCpu The IEM per CPU data.
6660 * @param pu32Value Where to store the popped value.
6661 */
6662static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
6663{
6664 /* Increment the stack pointer. */
6665 uint64_t uNewRsp;
6666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6667 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
6668
6669 /* Write the word the lazy way. */
6670 uint32_t const *pu32Src;
6671 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6672 if (rc == VINF_SUCCESS)
6673 {
6674 *pu32Value = *pu32Src;
6675 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6676
6677 /* Commit the new RSP value. */
6678 if (rc == VINF_SUCCESS)
6679 pCtx->rsp = uNewRsp;
6680 }
6681
6682 return rc;
6683}
6684
6685
6686/**
6687 * Pops a qword from the stack.
6688 *
6689 * @returns Strict VBox status code.
6690 * @param pIemCpu The IEM per CPU data.
6691 * @param pu64Value Where to store the popped value.
6692 */
6693static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
6694{
6695 /* Increment the stack pointer. */
6696 uint64_t uNewRsp;
6697 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6698 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
6699
6700 /* Write the word the lazy way. */
6701 uint64_t const *pu64Src;
6702 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6703 if (rc == VINF_SUCCESS)
6704 {
6705 *pu64Value = *pu64Src;
6706 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6707
6708 /* Commit the new RSP value. */
6709 if (rc == VINF_SUCCESS)
6710 pCtx->rsp = uNewRsp;
6711 }
6712
6713 return rc;
6714}
6715
6716
6717/**
6718 * Pushes a word onto the stack, using a temporary stack pointer.
6719 *
6720 * @returns Strict VBox status code.
6721 * @param pIemCpu The IEM per CPU data.
6722 * @param u16Value The value to push.
6723 * @param pTmpRsp Pointer to the temporary stack pointer.
6724 */
6725static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
6726{
6727 /* Increment the stack pointer. */
6728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6729 RTUINT64U NewRsp = *pTmpRsp;
6730 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
6731
6732 /* Write the word the lazy way. */
6733 uint16_t *pu16Dst;
6734 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6735 if (rc == VINF_SUCCESS)
6736 {
6737 *pu16Dst = u16Value;
6738 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
6739 }
6740
6741 /* Commit the new RSP value unless we an access handler made trouble. */
6742 if (rc == VINF_SUCCESS)
6743 *pTmpRsp = NewRsp;
6744
6745 return rc;
6746}
6747
6748
6749/**
6750 * Pushes a dword onto the stack, using a temporary stack pointer.
6751 *
6752 * @returns Strict VBox status code.
6753 * @param pIemCpu The IEM per CPU data.
6754 * @param u32Value The value to push.
6755 * @param pTmpRsp Pointer to the temporary stack pointer.
6756 */
6757static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
6758{
6759 /* Increment the stack pointer. */
6760 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6761 RTUINT64U NewRsp = *pTmpRsp;
6762 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
6763
6764 /* Write the word the lazy way. */
6765 uint32_t *pu32Dst;
6766 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6767 if (rc == VINF_SUCCESS)
6768 {
6769 *pu32Dst = u32Value;
6770 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
6771 }
6772
6773 /* Commit the new RSP value unless we an access handler made trouble. */
6774 if (rc == VINF_SUCCESS)
6775 *pTmpRsp = NewRsp;
6776
6777 return rc;
6778}
6779
6780
6781/**
6782 * Pushes a dword onto the stack, using a temporary stack pointer.
6783 *
6784 * @returns Strict VBox status code.
6785 * @param pIemCpu The IEM per CPU data.
6786 * @param u64Value The value to push.
6787 * @param pTmpRsp Pointer to the temporary stack pointer.
6788 */
6789static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
6790{
6791 /* Increment the stack pointer. */
6792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6793 RTUINT64U NewRsp = *pTmpRsp;
6794 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
6795
6796 /* Write the word the lazy way. */
6797 uint64_t *pu64Dst;
6798 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6799 if (rc == VINF_SUCCESS)
6800 {
6801 *pu64Dst = u64Value;
6802 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
6803 }
6804
6805 /* Commit the new RSP value unless we an access handler made trouble. */
6806 if (rc == VINF_SUCCESS)
6807 *pTmpRsp = NewRsp;
6808
6809 return rc;
6810}
6811
6812
6813/**
6814 * Pops a word from the stack, using a temporary stack pointer.
6815 *
6816 * @returns Strict VBox status code.
6817 * @param pIemCpu The IEM per CPU data.
6818 * @param pu16Value Where to store the popped value.
6819 * @param pTmpRsp Pointer to the temporary stack pointer.
6820 */
6821static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
6822{
6823 /* Increment the stack pointer. */
6824 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6825 RTUINT64U NewRsp = *pTmpRsp;
6826 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
6827
6828 /* Write the word the lazy way. */
6829 uint16_t const *pu16Src;
6830 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6831 if (rc == VINF_SUCCESS)
6832 {
6833 *pu16Value = *pu16Src;
6834 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
6835
6836 /* Commit the new RSP value. */
6837 if (rc == VINF_SUCCESS)
6838 *pTmpRsp = NewRsp;
6839 }
6840
6841 return rc;
6842}
6843
6844
6845/**
6846 * Pops a dword from the stack, using a temporary stack pointer.
6847 *
6848 * @returns Strict VBox status code.
6849 * @param pIemCpu The IEM per CPU data.
6850 * @param pu32Value Where to store the popped value.
6851 * @param pTmpRsp Pointer to the temporary stack pointer.
6852 */
6853static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
6854{
6855 /* Increment the stack pointer. */
6856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6857 RTUINT64U NewRsp = *pTmpRsp;
6858 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
6859
6860 /* Write the word the lazy way. */
6861 uint32_t const *pu32Src;
6862 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6863 if (rc == VINF_SUCCESS)
6864 {
6865 *pu32Value = *pu32Src;
6866 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
6867
6868 /* Commit the new RSP value. */
6869 if (rc == VINF_SUCCESS)
6870 *pTmpRsp = NewRsp;
6871 }
6872
6873 return rc;
6874}
6875
6876
6877/**
6878 * Pops a qword from the stack, using a temporary stack pointer.
6879 *
6880 * @returns Strict VBox status code.
6881 * @param pIemCpu The IEM per CPU data.
6882 * @param pu64Value Where to store the popped value.
6883 * @param pTmpRsp Pointer to the temporary stack pointer.
6884 */
6885static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
6886{
6887 /* Increment the stack pointer. */
6888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6889 RTUINT64U NewRsp = *pTmpRsp;
6890 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
6891
6892 /* Write the word the lazy way. */
6893 uint64_t const *pu64Src;
6894 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6895 if (rcStrict == VINF_SUCCESS)
6896 {
6897 *pu64Value = *pu64Src;
6898 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
6899
6900 /* Commit the new RSP value. */
6901 if (rcStrict == VINF_SUCCESS)
6902 *pTmpRsp = NewRsp;
6903 }
6904
6905 return rcStrict;
6906}
6907
6908
6909/**
6910 * Begin a special stack push (used by interrupt, exceptions and such).
6911 *
6912 * This will raise #SS or #PF if appropriate.
6913 *
6914 * @returns Strict VBox status code.
6915 * @param pIemCpu The IEM per CPU data.
6916 * @param cbMem The number of bytes to push onto the stack.
6917 * @param ppvMem Where to return the pointer to the stack memory.
6918 * As with the other memory functions this could be
6919 * direct access or bounce buffered access, so
6920 * don't commit register until the commit call
6921 * succeeds.
6922 * @param puNewRsp Where to return the new RSP value. This must be
6923 * passed unchanged to
6924 * iemMemStackPushCommitSpecial().
6925 */
6926static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
6927{
6928 Assert(cbMem < UINT8_MAX);
6929 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6930 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6931 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
6932}
6933
6934
6935/**
6936 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
6937 *
6938 * This will update the rSP.
6939 *
6940 * @returns Strict VBox status code.
6941 * @param pIemCpu The IEM per CPU data.
6942 * @param pvMem The pointer returned by
6943 * iemMemStackPushBeginSpecial().
6944 * @param uNewRsp The new RSP value returned by
6945 * iemMemStackPushBeginSpecial().
6946 */
6947static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
6948{
6949 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
6950 if (rcStrict == VINF_SUCCESS)
6951 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
6952 return rcStrict;
6953}
6954
6955
6956/**
6957 * Begin a special stack pop (used by iret, retf and such).
6958 *
6959 * This will raise \#SS or \#PF if appropriate.
6960 *
6961 * @returns Strict VBox status code.
6962 * @param pIemCpu The IEM per CPU data.
6963 * @param cbMem The number of bytes to push onto the stack.
6964 * @param ppvMem Where to return the pointer to the stack memory.
6965 * @param puNewRsp Where to return the new RSP value. This must be
6966 * passed unchanged to
6967 * iemMemStackPopCommitSpecial() or applied
6968 * manually if iemMemStackPopDoneSpecial() is used.
6969 */
6970static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6971{
6972 Assert(cbMem < UINT8_MAX);
6973 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6974 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
6975 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
6976}
6977
6978
6979/**
6980 * Continue a special stack pop (used by iret and retf).
6981 *
6982 * This will raise \#SS or \#PF if appropriate.
6983 *
6984 * @returns Strict VBox status code.
6985 * @param pIemCpu The IEM per CPU data.
6986 * @param cbMem The number of bytes to push onto the stack.
6987 * @param ppvMem Where to return the pointer to the stack memory.
6988 * @param puNewRsp Where to return the new RSP value. This must be
6989 * passed unchanged to
6990 * iemMemStackPopCommitSpecial() or applied
6991 * manually if iemMemStackPopDoneSpecial() is used.
6992 */
6993static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
6994{
6995 Assert(cbMem < UINT8_MAX);
6996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6997 RTUINT64U NewRsp;
6998 NewRsp.u = *puNewRsp;
6999 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7000 *puNewRsp = NewRsp.u;
7001 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7002}
7003
7004
7005/**
7006 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
7007 *
7008 * This will update the rSP.
7009 *
7010 * @returns Strict VBox status code.
7011 * @param pIemCpu The IEM per CPU data.
7012 * @param pvMem The pointer returned by
7013 * iemMemStackPopBeginSpecial().
7014 * @param uNewRsp The new RSP value returned by
7015 * iemMemStackPopBeginSpecial().
7016 */
7017static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
7018{
7019 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7020 if (rcStrict == VINF_SUCCESS)
7021 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
7022 return rcStrict;
7023}
7024
7025
7026/**
7027 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7028 * iemMemStackPopContinueSpecial).
7029 *
7030 * The caller will manually commit the rSP.
7031 *
7032 * @returns Strict VBox status code.
7033 * @param pIemCpu The IEM per CPU data.
7034 * @param pvMem The pointer returned by
7035 * iemMemStackPopBeginSpecial() or
7036 * iemMemStackPopContinueSpecial().
7037 */
7038static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
7039{
7040 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7041}
7042
7043
7044/**
7045 * Fetches a system table byte.
7046 *
7047 * @returns Strict VBox status code.
7048 * @param pIemCpu The IEM per CPU data.
7049 * @param pbDst Where to return the byte.
7050 * @param iSegReg The index of the segment register to use for
7051 * this access. The base and limits are checked.
7052 * @param GCPtrMem The address of the guest memory.
7053 */
7054static VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7055{
7056 /* The lazy approach for now... */
7057 uint8_t const *pbSrc;
7058 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7059 if (rc == VINF_SUCCESS)
7060 {
7061 *pbDst = *pbSrc;
7062 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7063 }
7064 return rc;
7065}
7066
7067
7068/**
7069 * Fetches a system table word.
7070 *
7071 * @returns Strict VBox status code.
7072 * @param pIemCpu The IEM per CPU data.
7073 * @param pu16Dst Where to return the word.
7074 * @param iSegReg The index of the segment register to use for
7075 * this access. The base and limits are checked.
7076 * @param GCPtrMem The address of the guest memory.
7077 */
7078static VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7079{
7080 /* The lazy approach for now... */
7081 uint16_t const *pu16Src;
7082 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7083 if (rc == VINF_SUCCESS)
7084 {
7085 *pu16Dst = *pu16Src;
7086 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7087 }
7088 return rc;
7089}
7090
7091
7092/**
7093 * Fetches a system table dword.
7094 *
7095 * @returns Strict VBox status code.
7096 * @param pIemCpu The IEM per CPU data.
7097 * @param pu32Dst Where to return the dword.
7098 * @param iSegReg The index of the segment register to use for
7099 * this access. The base and limits are checked.
7100 * @param GCPtrMem The address of the guest memory.
7101 */
7102static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7103{
7104 /* The lazy approach for now... */
7105 uint32_t const *pu32Src;
7106 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7107 if (rc == VINF_SUCCESS)
7108 {
7109 *pu32Dst = *pu32Src;
7110 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
7111 }
7112 return rc;
7113}
7114
7115
7116/**
7117 * Fetches a system table qword.
7118 *
7119 * @returns Strict VBox status code.
7120 * @param pIemCpu The IEM per CPU data.
7121 * @param pu64Dst Where to return the qword.
7122 * @param iSegReg The index of the segment register to use for
7123 * this access. The base and limits are checked.
7124 * @param GCPtrMem The address of the guest memory.
7125 */
7126static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7127{
7128 /* The lazy approach for now... */
7129 uint64_t const *pu64Src;
7130 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
7131 if (rc == VINF_SUCCESS)
7132 {
7133 *pu64Dst = *pu64Src;
7134 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
7135 }
7136 return rc;
7137}
7138
7139
7140/**
7141 * Fetches a descriptor table entry.
7142 *
7143 * @returns Strict VBox status code.
7144 * @param pIemCpu The IEM per CPU.
7145 * @param pDesc Where to return the descriptor table entry.
7146 * @param uSel The selector which table entry to fetch.
7147 * @param uXcpt The exception to raise on table lookup error.
7148 */
7149static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
7150{
7151 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7152
7153 /** @todo did the 286 require all 8 bytes to be accessible? */
7154 /*
7155 * Get the selector table base and check bounds.
7156 */
7157 RTGCPTR GCPtrBase;
7158 if (uSel & X86_SEL_LDT)
7159 {
7160 if ( !pCtx->ldtr.Attr.n.u1Present
7161 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
7162 {
7163 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7164 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
7165 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7166 uSel & ~X86_SEL_RPL, 0);
7167 }
7168
7169 Assert(pCtx->ldtr.Attr.n.u1Present);
7170 GCPtrBase = pCtx->ldtr.u64Base;
7171 }
7172 else
7173 {
7174 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
7175 {
7176 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
7177 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7178 uSel & ~X86_SEL_RPL, 0);
7179 }
7180 GCPtrBase = pCtx->gdtr.pGdt;
7181 }
7182
7183 /*
7184 * Read the legacy descriptor and maybe the long mode extensions if
7185 * required.
7186 */
7187 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7188 if (rcStrict == VINF_SUCCESS)
7189 {
7190 if ( !IEM_IS_LONG_MODE(pIemCpu)
7191 || pDesc->Legacy.Gen.u1DescType)
7192 pDesc->Long.au64[1] = 0;
7193 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
7194 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7195 else
7196 {
7197 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7198 /** @todo is this the right exception? */
7199 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7200 uSel & ~X86_SEL_RPL, 0);
7201 }
7202 }
7203 return rcStrict;
7204}
7205
7206
7207/**
7208 * Fakes a long mode stack selector for SS = 0.
7209 *
7210 * @param pDescSs Where to return the fake stack descriptor.
7211 * @param uDpl The DPL we want.
7212 */
7213static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
7214{
7215 pDescSs->Long.au64[0] = 0;
7216 pDescSs->Long.au64[1] = 0;
7217 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
7218 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
7219 pDescSs->Long.Gen.u2Dpl = uDpl;
7220 pDescSs->Long.Gen.u1Present = 1;
7221 pDescSs->Long.Gen.u1Long = 1;
7222}
7223
7224
7225/**
7226 * Marks the selector descriptor as accessed (only non-system descriptors).
7227 *
7228 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7229 * will therefore skip the limit checks.
7230 *
7231 * @returns Strict VBox status code.
7232 * @param pIemCpu The IEM per CPU.
7233 * @param uSel The selector.
7234 */
7235static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
7236{
7237 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7238
7239 /*
7240 * Get the selector table base and calculate the entry address.
7241 */
7242 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7243 ? pCtx->ldtr.u64Base
7244 : pCtx->gdtr.pGdt;
7245 GCPtr += uSel & X86_SEL_MASK;
7246
7247 /*
7248 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7249 * ugly stuff to avoid this. This will make sure it's an atomic access
7250 * as well more or less remove any question about 8-bit or 32-bit accesss.
7251 */
7252 VBOXSTRICTRC rcStrict;
7253 uint32_t volatile *pu32;
7254 if ((GCPtr & 3) == 0)
7255 {
7256 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7257 GCPtr += 2 + 2;
7258 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
7259 if (rcStrict != VINF_SUCCESS)
7260 return rcStrict;
7261 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7262 }
7263 else
7264 {
7265 /* The misaligned GDT/LDT case, map the whole thing. */
7266 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
7267 if (rcStrict != VINF_SUCCESS)
7268 return rcStrict;
7269 switch ((uintptr_t)pu32 & 3)
7270 {
7271 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7272 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7273 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7274 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7275 }
7276 }
7277
7278 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
7279}
7280
7281/** @} */
7282
7283
7284/*
7285 * Include the C/C++ implementation of instruction.
7286 */
7287#include "IEMAllCImpl.cpp.h"
7288
7289
7290
7291/** @name "Microcode" macros.
7292 *
7293 * The idea is that we should be able to use the same code to interpret
7294 * instructions as well as recompiler instructions. Thus this obfuscation.
7295 *
7296 * @{
7297 */
7298#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
7299#define IEM_MC_END() }
7300#define IEM_MC_PAUSE() do {} while (0)
7301#define IEM_MC_CONTINUE() do {} while (0)
7302
7303/** Internal macro. */
7304#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
7305 do \
7306 { \
7307 VBOXSTRICTRC rcStrict2 = a_Expr; \
7308 if (rcStrict2 != VINF_SUCCESS) \
7309 return rcStrict2; \
7310 } while (0)
7311
7312#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
7313#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
7314#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
7315#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
7316#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
7317#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
7318#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
7319
7320#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
7321#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
7322 do { \
7323 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
7324 return iemRaiseDeviceNotAvailable(pIemCpu); \
7325 } while (0)
7326#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
7327 do { \
7328 if ((pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW & X86_FSW_ES) \
7329 return iemRaiseMathFault(pIemCpu); \
7330 } while (0)
7331#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
7332 do { \
7333 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7334 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFSXR) \
7335 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2) ) \
7336 return iemRaiseUndefinedOpcode(pIemCpu); \
7337 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7338 return iemRaiseDeviceNotAvailable(pIemCpu); \
7339 } while (0)
7340#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
7341 do { \
7342 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7343 || !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MMX) ) \
7344 return iemRaiseUndefinedOpcode(pIemCpu); \
7345 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7346 return iemRaiseDeviceNotAvailable(pIemCpu); \
7347 } while (0)
7348#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
7349 do { \
7350 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
7351 || ( !IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE) \
7352 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_AMD_FEATURE_EDX_AXMMX) ) ) \
7353 return iemRaiseUndefinedOpcode(pIemCpu); \
7354 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
7355 return iemRaiseDeviceNotAvailable(pIemCpu); \
7356 } while (0)
7357#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
7358 do { \
7359 if (pIemCpu->uCpl != 0) \
7360 return iemRaiseGeneralProtectionFault0(pIemCpu); \
7361 } while (0)
7362
7363
7364#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
7365#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
7366#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
7367#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
7368#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
7369#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
7370#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
7371 uint32_t a_Name; \
7372 uint32_t *a_pName = &a_Name
7373#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
7374 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
7375
7376#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
7377#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
7378
7379#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7380#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7381#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7382#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
7383#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7384#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7385#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
7386#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7387#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7388#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
7389#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7390#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
7391#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7392#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
7393#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
7394#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
7395#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
7396#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7397#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7398#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
7399#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7400#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
7401#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
7402#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7403#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7404#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
7405#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7406#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7407#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
7408/** @note Not for IOPL or IF testing or modification. */
7409#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7410#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7411#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FSW
7412#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->fpu.FCW
7413
7414#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
7415#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
7416#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
7417#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
7418#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
7419#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
7420#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
7421#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
7422#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
7423#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
7424#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
7425 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
7426
7427#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
7428#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
7429/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
7430 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
7431#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
7432#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
7433/** @note Not for IOPL or IF testing or modification. */
7434#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
7435
7436#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
7437#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
7438#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
7439 do { \
7440 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7441 *pu32Reg += (a_u32Value); \
7442 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7443 } while (0)
7444#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
7445
7446#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
7447#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
7448#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
7449 do { \
7450 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7451 *pu32Reg -= (a_u32Value); \
7452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7453 } while (0)
7454#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
7455
7456#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
7457#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
7458#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
7459#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
7460#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
7461#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
7462#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
7463
7464#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
7465#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
7466#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7467#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
7468
7469#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
7470#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
7471#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
7472
7473#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
7474#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7475
7476#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
7477#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
7478#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
7479
7480#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
7481#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
7482#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
7483
7484#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
7485
7486#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
7487
7488#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
7489#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
7490#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
7491 do { \
7492 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7493 *pu32Reg &= (a_u32Value); \
7494 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7495 } while (0)
7496#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
7497
7498#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
7499#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
7500#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
7501 do { \
7502 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
7503 *pu32Reg |= (a_u32Value); \
7504 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
7505 } while (0)
7506#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
7507
7508
7509/** @note Not for IOPL or IF modification. */
7510#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
7511/** @note Not for IOPL or IF modification. */
7512#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
7513/** @note Not for IOPL or IF modification. */
7514#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
7515
7516#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
7517
7518
7519#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
7520 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx; } while (0)
7521#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
7522 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].au32[0]; } while (0)
7523#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
7524 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
7525#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
7526 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
7527#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
7528 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7529#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
7530 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7531#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
7532 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx)
7533
7534#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
7535 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm; } while (0)
7536#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
7537 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]; } while (0)
7538#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
7539 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au32[0]; } while (0)
7540#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
7541 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
7542#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
7543 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
7544 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7545 } while (0)
7546#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
7547 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
7548 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \
7549 } while (0)
7550#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
7551 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7552#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
7553 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm)
7554#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
7555 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0])
7556
7557#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
7558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
7559#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
7560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
7561#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
7562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
7563
7564#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
7566#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7568#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
7569 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
7570
7571#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7572 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
7573#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7574 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7575#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
7576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
7577
7578#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7580
7581#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7582 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
7583#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
7584 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
7585#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7586 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7587
7588#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
7589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
7590#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
7591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
7592#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
7593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
7594
7595#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
7596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7597#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
7598 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
7599
7600
7601
7602#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7603 do { \
7604 uint8_t u8Tmp; \
7605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7606 (a_u16Dst) = u8Tmp; \
7607 } while (0)
7608#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7609 do { \
7610 uint8_t u8Tmp; \
7611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7612 (a_u32Dst) = u8Tmp; \
7613 } while (0)
7614#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7615 do { \
7616 uint8_t u8Tmp; \
7617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7618 (a_u64Dst) = u8Tmp; \
7619 } while (0)
7620#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7621 do { \
7622 uint16_t u16Tmp; \
7623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7624 (a_u32Dst) = u16Tmp; \
7625 } while (0)
7626#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7627 do { \
7628 uint16_t u16Tmp; \
7629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7630 (a_u64Dst) = u16Tmp; \
7631 } while (0)
7632#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7633 do { \
7634 uint32_t u32Tmp; \
7635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7636 (a_u64Dst) = u32Tmp; \
7637 } while (0)
7638
7639#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
7640 do { \
7641 uint8_t u8Tmp; \
7642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7643 (a_u16Dst) = (int8_t)u8Tmp; \
7644 } while (0)
7645#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7646 do { \
7647 uint8_t u8Tmp; \
7648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7649 (a_u32Dst) = (int8_t)u8Tmp; \
7650 } while (0)
7651#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7652 do { \
7653 uint8_t u8Tmp; \
7654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
7655 (a_u64Dst) = (int8_t)u8Tmp; \
7656 } while (0)
7657#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
7658 do { \
7659 uint16_t u16Tmp; \
7660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7661 (a_u32Dst) = (int16_t)u16Tmp; \
7662 } while (0)
7663#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7664 do { \
7665 uint16_t u16Tmp; \
7666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
7667 (a_u64Dst) = (int16_t)u16Tmp; \
7668 } while (0)
7669#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
7670 do { \
7671 uint32_t u32Tmp; \
7672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
7673 (a_u64Dst) = (int32_t)u32Tmp; \
7674 } while (0)
7675
7676#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
7677 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
7678#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
7679 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
7680#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
7681 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
7682#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
7683 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
7684
7685#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
7686 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
7687#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
7688 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
7689#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
7690 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
7691#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
7692 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
7693
7694#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
7695#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
7696#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
7697#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
7698#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
7699#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
7700#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
7701 do { \
7702 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
7703 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
7704 } while (0)
7705
7706#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
7707 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7708#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
7709 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
7710
7711
7712#define IEM_MC_PUSH_U16(a_u16Value) \
7713 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
7714#define IEM_MC_PUSH_U32(a_u32Value) \
7715 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
7716#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
7717 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
7718#define IEM_MC_PUSH_U64(a_u64Value) \
7719 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
7720
7721#define IEM_MC_POP_U16(a_pu16Value) \
7722 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
7723#define IEM_MC_POP_U32(a_pu32Value) \
7724 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
7725#define IEM_MC_POP_U64(a_pu64Value) \
7726 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
7727
7728/** Maps guest memory for direct or bounce buffered access.
7729 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7730 * @remarks May return.
7731 */
7732#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
7733 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7734
7735/** Maps guest memory for direct or bounce buffered access.
7736 * The purpose is to pass it to an operand implementation, thus the a_iArg.
7737 * @remarks May return.
7738 */
7739#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
7740 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
7741
7742/** Commits the memory and unmaps the guest memory.
7743 * @remarks May return.
7744 */
7745#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
7746 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
7747
7748/** Commits the memory and unmaps the guest memory unless the FPU status word
7749 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
7750 * that would cause FLD not to store.
7751 *
7752 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
7753 * store, while \#P will not.
7754 *
7755 * @remarks May in theory return - for now.
7756 */
7757#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
7758 do { \
7759 if ( !(a_u16FSW & X86_FSW_ES) \
7760 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
7761 & ~(pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_MASK_ALL) ) ) \
7762 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
7763 } while (0)
7764
7765/** Calculate efficient address from R/M. */
7766#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
7767 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
7768
7769#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
7770#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
7771#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
7772#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
7773#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
7774#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
7775#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
7776
7777/**
7778 * Defers the rest of the instruction emulation to a C implementation routine
7779 * and returns, only taking the standard parameters.
7780 *
7781 * @param a_pfnCImpl The pointer to the C routine.
7782 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7783 */
7784#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7785
7786/**
7787 * Defers the rest of instruction emulation to a C implementation routine and
7788 * returns, taking one argument in addition to the standard ones.
7789 *
7790 * @param a_pfnCImpl The pointer to the C routine.
7791 * @param a0 The argument.
7792 */
7793#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7794
7795/**
7796 * Defers the rest of the instruction emulation to a C implementation routine
7797 * and returns, taking two arguments in addition to the standard ones.
7798 *
7799 * @param a_pfnCImpl The pointer to the C routine.
7800 * @param a0 The first extra argument.
7801 * @param a1 The second extra argument.
7802 */
7803#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7804
7805/**
7806 * Defers the rest of the instruction emulation to a C implementation routine
7807 * and returns, taking three arguments in addition to the standard ones.
7808 *
7809 * @param a_pfnCImpl The pointer to the C routine.
7810 * @param a0 The first extra argument.
7811 * @param a1 The second extra argument.
7812 * @param a2 The third extra argument.
7813 */
7814#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7815
7816/**
7817 * Defers the rest of the instruction emulation to a C implementation routine
7818 * and returns, taking four arguments in addition to the standard ones.
7819 *
7820 * @param a_pfnCImpl The pointer to the C routine.
7821 * @param a0 The first extra argument.
7822 * @param a1 The second extra argument.
7823 * @param a2 The third extra argument.
7824 * @param a3 The fourth extra argument.
7825 */
7826#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
7827
7828/**
7829 * Defers the rest of the instruction emulation to a C implementation routine
7830 * and returns, taking two arguments in addition to the standard ones.
7831 *
7832 * @param a_pfnCImpl The pointer to the C routine.
7833 * @param a0 The first extra argument.
7834 * @param a1 The second extra argument.
7835 * @param a2 The third extra argument.
7836 * @param a3 The fourth extra argument.
7837 * @param a4 The fifth extra argument.
7838 */
7839#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
7840
7841/**
7842 * Defers the entire instruction emulation to a C implementation routine and
7843 * returns, only taking the standard parameters.
7844 *
7845 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7846 *
7847 * @param a_pfnCImpl The pointer to the C routine.
7848 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
7849 */
7850#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
7851
7852/**
7853 * Defers the entire instruction emulation to a C implementation routine and
7854 * returns, taking one argument in addition to the standard ones.
7855 *
7856 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7857 *
7858 * @param a_pfnCImpl The pointer to the C routine.
7859 * @param a0 The argument.
7860 */
7861#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
7862
7863/**
7864 * Defers the entire instruction emulation to a C implementation routine and
7865 * returns, taking two arguments in addition to the standard ones.
7866 *
7867 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7868 *
7869 * @param a_pfnCImpl The pointer to the C routine.
7870 * @param a0 The first extra argument.
7871 * @param a1 The second extra argument.
7872 */
7873#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
7874
7875/**
7876 * Defers the entire instruction emulation to a C implementation routine and
7877 * returns, taking three arguments in addition to the standard ones.
7878 *
7879 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
7880 *
7881 * @param a_pfnCImpl The pointer to the C routine.
7882 * @param a0 The first extra argument.
7883 * @param a1 The second extra argument.
7884 * @param a2 The third extra argument.
7885 */
7886#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
7887
7888/**
7889 * Calls a FPU assembly implementation taking one visible argument.
7890 *
7891 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7892 * @param a0 The first extra argument.
7893 */
7894#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
7895 do { \
7896 iemFpuPrepareUsage(pIemCpu); \
7897 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0)); \
7898 } while (0)
7899
7900/**
7901 * Calls a FPU assembly implementation taking two visible arguments.
7902 *
7903 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7904 * @param a0 The first extra argument.
7905 * @param a1 The second extra argument.
7906 */
7907#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
7908 do { \
7909 iemFpuPrepareUsage(pIemCpu); \
7910 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
7911 } while (0)
7912
7913/**
7914 * Calls a FPU assembly implementation taking three visible arguments.
7915 *
7916 * @param a_pfnAImpl Pointer to the assembly FPU routine.
7917 * @param a0 The first extra argument.
7918 * @param a1 The second extra argument.
7919 * @param a2 The third extra argument.
7920 */
7921#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
7922 do { \
7923 iemFpuPrepareUsage(pIemCpu); \
7924 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
7925 } while (0)
7926
7927#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
7928 do { \
7929 (a_FpuData).FSW = (a_FSW); \
7930 (a_FpuData).r80Result = *(a_pr80Value); \
7931 } while (0)
7932
7933/** Pushes FPU result onto the stack. */
7934#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
7935 iemFpuPushResult(pIemCpu, &a_FpuData)
7936/** Pushes FPU result onto the stack and sets the FPUDP. */
7937#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
7938 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
7939
7940/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
7941#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
7942 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
7943
7944/** Stores FPU result in a stack register. */
7945#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
7946 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
7947/** Stores FPU result in a stack register and pops the stack. */
7948#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
7949 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
7950/** Stores FPU result in a stack register and sets the FPUDP. */
7951#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7952 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7953/** Stores FPU result in a stack register, sets the FPUDP, and pops the
7954 * stack. */
7955#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
7956 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
7957
7958/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
7959#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
7960 iemFpuUpdateOpcodeAndIp(pIemCpu)
7961/** Free a stack register (for FFREE and FFREEP). */
7962#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
7963 iemFpuStackFree(pIemCpu, a_iStReg)
7964/** Increment the FPU stack pointer. */
7965#define IEM_MC_FPU_STACK_INC_TOP() \
7966 iemFpuStackIncTop(pIemCpu)
7967/** Decrement the FPU stack pointer. */
7968#define IEM_MC_FPU_STACK_DEC_TOP() \
7969 iemFpuStackDecTop(pIemCpu)
7970
7971/** Updates the FSW, FOP, FPUIP, and FPUCS. */
7972#define IEM_MC_UPDATE_FSW(a_u16FSW) \
7973 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7974/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
7975#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
7976 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
7977/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
7978#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7979 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7980/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
7981#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
7982 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7983/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
7984 * stack. */
7985#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
7986 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
7987/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
7988#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
7989 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
7990
7991/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
7992#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
7993 iemFpuStackUnderflow(pIemCpu, a_iStDst)
7994/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
7995 * stack. */
7996#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
7997 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
7998/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
7999 * FPUDS. */
8000#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8001 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8002/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
8003 * FPUDS. Pops stack. */
8004#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
8005 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
8006/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
8007 * stack twice. */
8008#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
8009 iemFpuStackUnderflowThenPopPop(pIemCpu)
8010/** Raises a FPU stack underflow exception for an instruction pushing a result
8011 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
8012#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
8013 iemFpuStackPushUnderflow(pIemCpu)
8014/** Raises a FPU stack underflow exception for an instruction pushing a result
8015 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
8016#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
8017 iemFpuStackPushUnderflowTwo(pIemCpu)
8018
8019/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8020 * FPUIP, FPUCS and FOP. */
8021#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
8022 iemFpuStackPushOverflow(pIemCpu)
8023/** Raises a FPU stack overflow exception as part of a push attempt. Sets
8024 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
8025#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
8026 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
8027/** Indicates that we (might) have modified the FPU state. */
8028#define IEM_MC_USED_FPU() \
8029 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
8030
8031/**
8032 * Calls a MMX assembly implementation taking two visible arguments.
8033 *
8034 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8035 * @param a0 The first extra argument.
8036 * @param a1 The second extra argument.
8037 */
8038#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
8039 do { \
8040 iemFpuPrepareUsage(pIemCpu); \
8041 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
8042 } while (0)
8043
8044/**
8045 * Calls a MMX assembly implementation taking three visible arguments.
8046 *
8047 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8048 * @param a0 The first extra argument.
8049 * @param a1 The second extra argument.
8050 * @param a2 The third extra argument.
8051 */
8052#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8053 do { \
8054 iemFpuPrepareUsage(pIemCpu); \
8055 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
8056 } while (0)
8057
8058
8059/**
8060 * Calls a SSE assembly implementation taking two visible arguments.
8061 *
8062 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8063 * @param a0 The first extra argument.
8064 * @param a1 The second extra argument.
8065 */
8066#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
8067 do { \
8068 iemFpuPrepareUsageSse(pIemCpu); \
8069 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \
8070 } while (0)
8071
8072/**
8073 * Calls a SSE assembly implementation taking three visible arguments.
8074 *
8075 * @param a_pfnAImpl Pointer to the assembly MMX routine.
8076 * @param a0 The first extra argument.
8077 * @param a1 The second extra argument.
8078 * @param a2 The third extra argument.
8079 */
8080#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
8081 do { \
8082 iemFpuPrepareUsageSse(pIemCpu); \
8083 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \
8084 } while (0)
8085
8086
8087/** @note Not for IOPL or IF testing. */
8088#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
8089/** @note Not for IOPL or IF testing. */
8090#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
8091/** @note Not for IOPL or IF testing. */
8092#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
8093/** @note Not for IOPL or IF testing. */
8094#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
8095/** @note Not for IOPL or IF testing. */
8096#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
8097 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
8098 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
8099/** @note Not for IOPL or IF testing. */
8100#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
8101 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
8102 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
8103/** @note Not for IOPL or IF testing. */
8104#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
8105 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
8106 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
8107 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
8108/** @note Not for IOPL or IF testing. */
8109#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
8110 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
8111 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
8112 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
8113#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
8114#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
8115#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
8116/** @note Not for IOPL or IF testing. */
8117#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
8118 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
8119 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8120/** @note Not for IOPL or IF testing. */
8121#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
8122 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
8123 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8124/** @note Not for IOPL or IF testing. */
8125#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
8126 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
8127 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8128/** @note Not for IOPL or IF testing. */
8129#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
8130 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
8131 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8132/** @note Not for IOPL or IF testing. */
8133#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
8134 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
8135 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8136/** @note Not for IOPL or IF testing. */
8137#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
8138 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
8139 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
8140#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
8141#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
8142#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
8143 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
8144#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
8145 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
8146#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
8147 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
8148#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
8149 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
8150#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
8151 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
8152#define IEM_MC_IF_FCW_IM() \
8153 if (pIemCpu->CTX_SUFF(pCtx)->fpu.FCW & X86_FCW_IM) {
8154
8155#define IEM_MC_ELSE() } else {
8156#define IEM_MC_ENDIF() } do {} while (0)
8157
8158/** @} */
8159
8160
8161/** @name Opcode Debug Helpers.
8162 * @{
8163 */
8164#ifdef DEBUG
8165# define IEMOP_MNEMONIC(a_szMnemonic) \
8166 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
8167 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
8168# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
8169 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
8170 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
8171#else
8172# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
8173# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
8174#endif
8175
8176/** @} */
8177
8178
8179/** @name Opcode Helpers.
8180 * @{
8181 */
8182
8183/** The instruction raises an \#UD in real and V8086 mode. */
8184#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
8185 do \
8186 { \
8187 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
8188 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8189 } while (0)
8190
8191/** The instruction allows no lock prefixing (in this encoding), throw #UD if
8192 * lock prefixed.
8193 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
8194#define IEMOP_HLP_NO_LOCK_PREFIX() \
8195 do \
8196 { \
8197 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8198 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8199 } while (0)
8200
8201/** The instruction is not available in 64-bit mode, throw #UD if we're in
8202 * 64-bit mode. */
8203#define IEMOP_HLP_NO_64BIT() \
8204 do \
8205 { \
8206 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
8207 return IEMOP_RAISE_INVALID_OPCODE(); \
8208 } while (0)
8209
8210/** The instruction is only available in 64-bit mode, throw #UD if we're not in
8211 * 64-bit mode. */
8212#define IEMOP_HLP_ONLY_64BIT() \
8213 do \
8214 { \
8215 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
8216 return IEMOP_RAISE_INVALID_OPCODE(); \
8217 } while (0)
8218
8219/** The instruction defaults to 64-bit operand size if 64-bit mode. */
8220#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
8221 do \
8222 { \
8223 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
8224 iemRecalEffOpSize64Default(pIemCpu); \
8225 } while (0)
8226
8227/** The instruction has 64-bit operand size if 64-bit mode. */
8228#define IEMOP_HLP_64BIT_OP_SIZE() \
8229 do \
8230 { \
8231 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
8232 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
8233 } while (0)
8234
8235/** Only a REX prefix immediately preceeding the first opcode byte takes
8236 * effect. This macro helps ensuring this as well as logging bad guest code. */
8237#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
8238 do \
8239 { \
8240 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
8241 { \
8242 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
8243 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
8244 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
8245 pIemCpu->uRexB = 0; \
8246 pIemCpu->uRexIndex = 0; \
8247 pIemCpu->uRexReg = 0; \
8248 iemRecalEffOpSize(pIemCpu); \
8249 } \
8250 } while (0)
8251
8252/**
8253 * Done decoding.
8254 */
8255#define IEMOP_HLP_DONE_DECODING() \
8256 do \
8257 { \
8258 /*nothing for now, maybe later... */ \
8259 } while (0)
8260
8261/**
8262 * Done decoding, raise \#UD exception if lock prefix present.
8263 */
8264#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
8265 do \
8266 { \
8267 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8268 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8269 } while (0)
8270#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
8271 do \
8272 { \
8273 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8274 { \
8275 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
8276 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8277 } \
8278 } while (0)
8279#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
8280 do \
8281 { \
8282 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
8283 { \
8284 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
8285 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
8286 } \
8287 } while (0)
8288
8289
8290/**
8291 * Calculates the effective address of a ModR/M memory operand.
8292 *
8293 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8294 *
8295 * @return Strict VBox status code.
8296 * @param pIemCpu The IEM per CPU data.
8297 * @param bRm The ModRM byte.
8298 * @param cbImm The size of any immediate following the
8299 * effective address opcode bytes. Important for
8300 * RIP relative addressing.
8301 * @param pGCPtrEff Where to return the effective address.
8302 */
8303static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
8304{
8305 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8306 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8307#define SET_SS_DEF() \
8308 do \
8309 { \
8310 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8311 pIemCpu->iEffSeg = X86_SREG_SS; \
8312 } while (0)
8313
8314 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
8315 {
8316/** @todo Check the effective address size crap! */
8317 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
8318 {
8319 uint16_t u16EffAddr;
8320
8321 /* Handle the disp16 form with no registers first. */
8322 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8323 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8324 else
8325 {
8326 /* Get the displacment. */
8327 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8328 {
8329 case 0: u16EffAddr = 0; break;
8330 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8331 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8332 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
8333 }
8334
8335 /* Add the base and index registers to the disp. */
8336 switch (bRm & X86_MODRM_RM_MASK)
8337 {
8338 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
8339 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
8340 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
8341 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
8342 case 4: u16EffAddr += pCtx->si; break;
8343 case 5: u16EffAddr += pCtx->di; break;
8344 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
8345 case 7: u16EffAddr += pCtx->bx; break;
8346 }
8347 }
8348
8349 *pGCPtrEff = u16EffAddr;
8350 }
8351 else
8352 {
8353 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8354 uint32_t u32EffAddr;
8355
8356 /* Handle the disp32 form with no registers first. */
8357 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8358 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8359 else
8360 {
8361 /* Get the register (or SIB) value. */
8362 switch ((bRm & X86_MODRM_RM_MASK))
8363 {
8364 case 0: u32EffAddr = pCtx->eax; break;
8365 case 1: u32EffAddr = pCtx->ecx; break;
8366 case 2: u32EffAddr = pCtx->edx; break;
8367 case 3: u32EffAddr = pCtx->ebx; break;
8368 case 4: /* SIB */
8369 {
8370 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8371
8372 /* Get the index and scale it. */
8373 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8374 {
8375 case 0: u32EffAddr = pCtx->eax; break;
8376 case 1: u32EffAddr = pCtx->ecx; break;
8377 case 2: u32EffAddr = pCtx->edx; break;
8378 case 3: u32EffAddr = pCtx->ebx; break;
8379 case 4: u32EffAddr = 0; /*none */ break;
8380 case 5: u32EffAddr = pCtx->ebp; break;
8381 case 6: u32EffAddr = pCtx->esi; break;
8382 case 7: u32EffAddr = pCtx->edi; break;
8383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8384 }
8385 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8386
8387 /* add base */
8388 switch (bSib & X86_SIB_BASE_MASK)
8389 {
8390 case 0: u32EffAddr += pCtx->eax; break;
8391 case 1: u32EffAddr += pCtx->ecx; break;
8392 case 2: u32EffAddr += pCtx->edx; break;
8393 case 3: u32EffAddr += pCtx->ebx; break;
8394 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
8395 case 5:
8396 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8397 {
8398 u32EffAddr += pCtx->ebp;
8399 SET_SS_DEF();
8400 }
8401 else
8402 {
8403 uint32_t u32Disp;
8404 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8405 u32EffAddr += u32Disp;
8406 }
8407 break;
8408 case 6: u32EffAddr += pCtx->esi; break;
8409 case 7: u32EffAddr += pCtx->edi; break;
8410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8411 }
8412 break;
8413 }
8414 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
8415 case 6: u32EffAddr = pCtx->esi; break;
8416 case 7: u32EffAddr = pCtx->edi; break;
8417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8418 }
8419
8420 /* Get and add the displacement. */
8421 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8422 {
8423 case 0:
8424 break;
8425 case 1:
8426 {
8427 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8428 u32EffAddr += i8Disp;
8429 break;
8430 }
8431 case 2:
8432 {
8433 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8434 u32EffAddr += u32Disp;
8435 break;
8436 }
8437 default:
8438 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */
8439 }
8440
8441 }
8442 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
8443 *pGCPtrEff = u32EffAddr;
8444 else
8445 {
8446 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
8447 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8448 }
8449 }
8450 }
8451 else
8452 {
8453 uint64_t u64EffAddr;
8454
8455 /* Handle the rip+disp32 form with no registers first. */
8456 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8457 {
8458 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8459 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
8460 }
8461 else
8462 {
8463 /* Get the register (or SIB) value. */
8464 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
8465 {
8466 case 0: u64EffAddr = pCtx->rax; break;
8467 case 1: u64EffAddr = pCtx->rcx; break;
8468 case 2: u64EffAddr = pCtx->rdx; break;
8469 case 3: u64EffAddr = pCtx->rbx; break;
8470 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
8471 case 6: u64EffAddr = pCtx->rsi; break;
8472 case 7: u64EffAddr = pCtx->rdi; break;
8473 case 8: u64EffAddr = pCtx->r8; break;
8474 case 9: u64EffAddr = pCtx->r9; break;
8475 case 10: u64EffAddr = pCtx->r10; break;
8476 case 11: u64EffAddr = pCtx->r11; break;
8477 case 13: u64EffAddr = pCtx->r13; break;
8478 case 14: u64EffAddr = pCtx->r14; break;
8479 case 15: u64EffAddr = pCtx->r15; break;
8480 /* SIB */
8481 case 4:
8482 case 12:
8483 {
8484 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8485
8486 /* Get the index and scale it. */
8487 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
8488 {
8489 case 0: u64EffAddr = pCtx->rax; break;
8490 case 1: u64EffAddr = pCtx->rcx; break;
8491 case 2: u64EffAddr = pCtx->rdx; break;
8492 case 3: u64EffAddr = pCtx->rbx; break;
8493 case 4: u64EffAddr = 0; /*none */ break;
8494 case 5: u64EffAddr = pCtx->rbp; break;
8495 case 6: u64EffAddr = pCtx->rsi; break;
8496 case 7: u64EffAddr = pCtx->rdi; break;
8497 case 8: u64EffAddr = pCtx->r8; break;
8498 case 9: u64EffAddr = pCtx->r9; break;
8499 case 10: u64EffAddr = pCtx->r10; break;
8500 case 11: u64EffAddr = pCtx->r11; break;
8501 case 12: u64EffAddr = pCtx->r12; break;
8502 case 13: u64EffAddr = pCtx->r13; break;
8503 case 14: u64EffAddr = pCtx->r14; break;
8504 case 15: u64EffAddr = pCtx->r15; break;
8505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8506 }
8507 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8508
8509 /* add base */
8510 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
8511 {
8512 case 0: u64EffAddr += pCtx->rax; break;
8513 case 1: u64EffAddr += pCtx->rcx; break;
8514 case 2: u64EffAddr += pCtx->rdx; break;
8515 case 3: u64EffAddr += pCtx->rbx; break;
8516 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
8517 case 6: u64EffAddr += pCtx->rsi; break;
8518 case 7: u64EffAddr += pCtx->rdi; break;
8519 case 8: u64EffAddr += pCtx->r8; break;
8520 case 9: u64EffAddr += pCtx->r9; break;
8521 case 10: u64EffAddr += pCtx->r10; break;
8522 case 11: u64EffAddr += pCtx->r11; break;
8523 case 12: u64EffAddr += pCtx->r12; break;
8524 case 14: u64EffAddr += pCtx->r14; break;
8525 case 15: u64EffAddr += pCtx->r15; break;
8526 /* complicated encodings */
8527 case 5:
8528 case 13:
8529 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8530 {
8531 if (!pIemCpu->uRexB)
8532 {
8533 u64EffAddr += pCtx->rbp;
8534 SET_SS_DEF();
8535 }
8536 else
8537 u64EffAddr += pCtx->r13;
8538 }
8539 else
8540 {
8541 uint32_t u32Disp;
8542 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8543 u64EffAddr += (int32_t)u32Disp;
8544 }
8545 break;
8546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8547 }
8548 break;
8549 }
8550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8551 }
8552
8553 /* Get and add the displacement. */
8554 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8555 {
8556 case 0:
8557 break;
8558 case 1:
8559 {
8560 int8_t i8Disp;
8561 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8562 u64EffAddr += i8Disp;
8563 break;
8564 }
8565 case 2:
8566 {
8567 uint32_t u32Disp;
8568 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8569 u64EffAddr += (int32_t)u32Disp;
8570 break;
8571 }
8572 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8573 }
8574
8575 }
8576
8577 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
8578 *pGCPtrEff = u64EffAddr;
8579 else
8580 {
8581 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
8582 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8583 }
8584 }
8585
8586 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8587 return VINF_SUCCESS;
8588}
8589
8590/** @} */
8591
8592
8593
8594/*
8595 * Include the instructions
8596 */
8597#include "IEMAllInstructions.cpp.h"
8598
8599
8600
8601
8602#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8603
8604/**
8605 * Sets up execution verification mode.
8606 */
8607static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
8608{
8609 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8610 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
8611
8612 /*
8613 * Always note down the address of the current instruction.
8614 */
8615 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
8616 pIemCpu->uOldRip = pOrgCtx->rip;
8617
8618 /*
8619 * Enable verification and/or logging.
8620 */
8621 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
8622 if ( fNewNoRem
8623 && ( 0
8624#if 0 /* auto enable on first paged protected mode interrupt */
8625 || ( pOrgCtx->eflags.Bits.u1IF
8626 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
8627 && TRPMHasTrap(pVCpu)
8628 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
8629#endif
8630#if 0
8631 || ( pOrgCtx->cs == 0x10
8632 && ( pOrgCtx->rip == 0x90119e3e
8633 || pOrgCtx->rip == 0x901d9810)
8634#endif
8635#if 0 /* Auto enable DSL - FPU stuff. */
8636 || ( pOrgCtx->cs == 0x10
8637 && (// pOrgCtx->rip == 0xc02ec07f
8638 //|| pOrgCtx->rip == 0xc02ec082
8639 //|| pOrgCtx->rip == 0xc02ec0c9
8640 0
8641 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
8642#endif
8643#if 0 /* Auto enable DSL - fstp st0 stuff. */
8644 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
8645#endif
8646#if 0
8647 || pOrgCtx->rip == 0x9022bb3a
8648#endif
8649#if 0
8650 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
8651#endif
8652#if 0
8653 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
8654 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
8655#endif
8656#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
8657 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
8658 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
8659 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
8660#endif
8661#if 0 /* NT4SP1 - xadd early boot. */
8662 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
8663#endif
8664#if 0 /* NT4SP1 - wrmsr (intel MSR). */
8665 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
8666#endif
8667#if 0 /* NT4SP1 - cmpxchg (AMD). */
8668 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
8669#endif
8670#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
8671 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
8672#endif
8673#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
8674 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
8675
8676#endif
8677#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
8678 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
8679
8680#endif
8681#if 0 /* NT4SP1 - frstor [ecx] */
8682 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
8683#endif
8684#if 0 /* xxxxxx - All long mode code. */
8685 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
8686#endif
8687#if 0 /* rep movsq linux 3.7 64-bit boot. */
8688 || (pOrgCtx->rip == 0x0000000000100241)
8689#endif
8690#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
8691 || (pOrgCtx->rip == 0x000000000215e240)
8692#endif
8693#if 0 /* DOS's size-overridden iret to v8086. */
8694 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
8695#endif
8696 )
8697 )
8698 {
8699 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
8700 RTLogFlags(NULL, "enabled");
8701 fNewNoRem = false;
8702 }
8703 if (fNewNoRem != pIemCpu->fNoRem)
8704 {
8705 pIemCpu->fNoRem = fNewNoRem;
8706 if (!fNewNoRem)
8707 {
8708 LogAlways(("Enabling verification mode!\n"));
8709 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
8710 }
8711 else
8712 LogAlways(("Disabling verification mode!\n"));
8713 }
8714
8715 /*
8716 * Switch state.
8717 */
8718 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8719 {
8720 static CPUMCTX s_DebugCtx; /* Ugly! */
8721
8722 s_DebugCtx = *pOrgCtx;
8723 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
8724 }
8725
8726 /*
8727 * See if there is an interrupt pending in TRPM and inject it if we can.
8728 */
8729 pIemCpu->uInjectCpl = UINT8_MAX;
8730 if ( pOrgCtx->eflags.Bits.u1IF
8731 && TRPMHasTrap(pVCpu)
8732 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
8733 {
8734 uint8_t u8TrapNo;
8735 TRPMEVENT enmType;
8736 RTGCUINT uErrCode;
8737 RTGCPTR uCr2;
8738 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
8739 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
8740 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8741 TRPMResetTrap(pVCpu);
8742 pIemCpu->uInjectCpl = pIemCpu->uCpl;
8743 }
8744
8745 /*
8746 * Reset the counters.
8747 */
8748 pIemCpu->cIOReads = 0;
8749 pIemCpu->cIOWrites = 0;
8750 pIemCpu->fIgnoreRaxRdx = false;
8751 pIemCpu->fOverlappingMovs = false;
8752 pIemCpu->fProblematicMemory = false;
8753 pIemCpu->fUndefinedEFlags = 0;
8754
8755 if (IEM_VERIFICATION_ENABLED(pIemCpu))
8756 {
8757 /*
8758 * Free all verification records.
8759 */
8760 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
8761 pIemCpu->pIemEvtRecHead = NULL;
8762 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
8763 do
8764 {
8765 while (pEvtRec)
8766 {
8767 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
8768 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
8769 pIemCpu->pFreeEvtRec = pEvtRec;
8770 pEvtRec = pNext;
8771 }
8772 pEvtRec = pIemCpu->pOtherEvtRecHead;
8773 pIemCpu->pOtherEvtRecHead = NULL;
8774 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
8775 } while (pEvtRec);
8776 }
8777}
8778
8779
8780/**
8781 * Allocate an event record.
8782 * @returns Pointer to a record.
8783 */
8784static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
8785{
8786 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
8787 return NULL;
8788
8789 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
8790 if (pEvtRec)
8791 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
8792 else
8793 {
8794 if (!pIemCpu->ppIemEvtRecNext)
8795 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
8796
8797 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
8798 if (!pEvtRec)
8799 return NULL;
8800 }
8801 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
8802 pEvtRec->pNext = NULL;
8803 return pEvtRec;
8804}
8805
8806
8807/**
8808 * IOMMMIORead notification.
8809 */
8810VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
8811{
8812 PVMCPU pVCpu = VMMGetCpu(pVM);
8813 if (!pVCpu)
8814 return;
8815 PIEMCPU pIemCpu = &pVCpu->iem.s;
8816 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8817 if (!pEvtRec)
8818 return;
8819 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8820 pEvtRec->u.RamRead.GCPhys = GCPhys;
8821 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
8822 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8823 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8824}
8825
8826
8827/**
8828 * IOMMMIOWrite notification.
8829 */
8830VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
8831{
8832 PVMCPU pVCpu = VMMGetCpu(pVM);
8833 if (!pVCpu)
8834 return;
8835 PIEMCPU pIemCpu = &pVCpu->iem.s;
8836 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8837 if (!pEvtRec)
8838 return;
8839 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8840 pEvtRec->u.RamWrite.GCPhys = GCPhys;
8841 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
8842 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
8843 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
8844 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
8845 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
8846 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8847 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8848}
8849
8850
8851/**
8852 * IOMIOPortRead notification.
8853 */
8854VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
8855{
8856 PVMCPU pVCpu = VMMGetCpu(pVM);
8857 if (!pVCpu)
8858 return;
8859 PIEMCPU pIemCpu = &pVCpu->iem.s;
8860 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8861 if (!pEvtRec)
8862 return;
8863 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8864 pEvtRec->u.IOPortRead.Port = Port;
8865 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8866 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8867 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8868}
8869
8870/**
8871 * IOMIOPortWrite notification.
8872 */
8873VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8874{
8875 PVMCPU pVCpu = VMMGetCpu(pVM);
8876 if (!pVCpu)
8877 return;
8878 PIEMCPU pIemCpu = &pVCpu->iem.s;
8879 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8880 if (!pEvtRec)
8881 return;
8882 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8883 pEvtRec->u.IOPortWrite.Port = Port;
8884 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8885 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8886 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
8887 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
8888}
8889
8890
8891VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
8892{
8893 AssertFailed();
8894}
8895
8896
8897VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
8898{
8899 AssertFailed();
8900}
8901
8902
8903/**
8904 * Fakes and records an I/O port read.
8905 *
8906 * @returns VINF_SUCCESS.
8907 * @param pIemCpu The IEM per CPU data.
8908 * @param Port The I/O port.
8909 * @param pu32Value Where to store the fake value.
8910 * @param cbValue The size of the access.
8911 */
8912static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
8913{
8914 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8915 if (pEvtRec)
8916 {
8917 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
8918 pEvtRec->u.IOPortRead.Port = Port;
8919 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
8920 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8921 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8922 }
8923 pIemCpu->cIOReads++;
8924 *pu32Value = 0xcccccccc;
8925 return VINF_SUCCESS;
8926}
8927
8928
8929/**
8930 * Fakes and records an I/O port write.
8931 *
8932 * @returns VINF_SUCCESS.
8933 * @param pIemCpu The IEM per CPU data.
8934 * @param Port The I/O port.
8935 * @param u32Value The value being written.
8936 * @param cbValue The size of the access.
8937 */
8938static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
8939{
8940 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
8941 if (pEvtRec)
8942 {
8943 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
8944 pEvtRec->u.IOPortWrite.Port = Port;
8945 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
8946 pEvtRec->u.IOPortWrite.u32Value = u32Value;
8947 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
8948 *pIemCpu->ppIemEvtRecNext = pEvtRec;
8949 }
8950 pIemCpu->cIOWrites++;
8951 return VINF_SUCCESS;
8952}
8953
8954
8955/**
8956 * Used to add extra details about a stub case.
8957 * @param pIemCpu The IEM per CPU state.
8958 */
8959static void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
8960{
8961 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8962 PVM pVM = IEMCPU_TO_VM(pIemCpu);
8963 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
8964 char szRegs[4096];
8965 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
8966 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
8967 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
8968 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
8969 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
8970 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
8971 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
8972 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
8973 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
8974 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
8975 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
8976 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
8977 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
8978 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
8979 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
8980 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
8981 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
8982 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
8983 " efer=%016VR{efer}\n"
8984 " pat=%016VR{pat}\n"
8985 " sf_mask=%016VR{sf_mask}\n"
8986 "krnl_gs_base=%016VR{krnl_gs_base}\n"
8987 " lstar=%016VR{lstar}\n"
8988 " star=%016VR{star} cstar=%016VR{cstar}\n"
8989 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
8990 );
8991
8992 char szInstr1[256];
8993 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
8994 DBGF_DISAS_FLAGS_DEFAULT_MODE,
8995 szInstr1, sizeof(szInstr1), NULL);
8996 char szInstr2[256];
8997 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
8998 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8999 szInstr2, sizeof(szInstr2), NULL);
9000
9001 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
9002}
9003
9004
9005/**
9006 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
9007 * dump to the assertion info.
9008 *
9009 * @param pEvtRec The record to dump.
9010 */
9011static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
9012{
9013 switch (pEvtRec->enmEvent)
9014 {
9015 case IEMVERIFYEVENT_IOPORT_READ:
9016 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
9017 pEvtRec->u.IOPortWrite.Port,
9018 pEvtRec->u.IOPortWrite.cbValue);
9019 break;
9020 case IEMVERIFYEVENT_IOPORT_WRITE:
9021 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
9022 pEvtRec->u.IOPortWrite.Port,
9023 pEvtRec->u.IOPortWrite.cbValue,
9024 pEvtRec->u.IOPortWrite.u32Value);
9025 break;
9026 case IEMVERIFYEVENT_RAM_READ:
9027 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
9028 pEvtRec->u.RamRead.GCPhys,
9029 pEvtRec->u.RamRead.cb);
9030 break;
9031 case IEMVERIFYEVENT_RAM_WRITE:
9032 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
9033 pEvtRec->u.RamWrite.GCPhys,
9034 pEvtRec->u.RamWrite.cb,
9035 (int)pEvtRec->u.RamWrite.cb,
9036 pEvtRec->u.RamWrite.ab);
9037 break;
9038 default:
9039 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
9040 break;
9041 }
9042}
9043
9044
9045/**
9046 * Raises an assertion on the specified record, showing the given message with
9047 * a record dump attached.
9048 *
9049 * @param pIemCpu The IEM per CPU data.
9050 * @param pEvtRec1 The first record.
9051 * @param pEvtRec2 The second record.
9052 * @param pszMsg The message explaining why we're asserting.
9053 */
9054static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
9055{
9056 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
9057 iemVerifyAssertAddRecordDump(pEvtRec1);
9058 iemVerifyAssertAddRecordDump(pEvtRec2);
9059 iemVerifyAssertMsg2(pIemCpu);
9060 RTAssertPanic();
9061}
9062
9063
9064/**
9065 * Raises an assertion on the specified record, showing the given message with
9066 * a record dump attached.
9067 *
9068 * @param pIemCpu The IEM per CPU data.
9069 * @param pEvtRec1 The first record.
9070 * @param pszMsg The message explaining why we're asserting.
9071 */
9072static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
9073{
9074 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
9075 iemVerifyAssertAddRecordDump(pEvtRec);
9076 iemVerifyAssertMsg2(pIemCpu);
9077 RTAssertPanic();
9078}
9079
9080
9081/**
9082 * Verifies a write record.
9083 *
9084 * @param pIemCpu The IEM per CPU data.
9085 * @param pEvtRec The write record.
9086 * @param fRem Set if REM was doing the other executing. If clear
9087 * it was HM.
9088 */
9089static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
9090{
9091 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
9092 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
9093 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
9094 if ( RT_FAILURE(rc)
9095 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
9096 {
9097 /* fend off ins */
9098 if ( !pIemCpu->cIOReads
9099 || pEvtRec->u.RamWrite.ab[0] != 0xcc
9100 || ( pEvtRec->u.RamWrite.cb != 1
9101 && pEvtRec->u.RamWrite.cb != 2
9102 && pEvtRec->u.RamWrite.cb != 4) )
9103 {
9104 /* fend off ROMs and MMIO */
9105 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
9106 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
9107 {
9108 /* fend off fxsave */
9109 if (pEvtRec->u.RamWrite.cb != 512)
9110 {
9111 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
9112 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
9113 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
9114 RTAssertMsg2Add("%s: %.*Rhxs\n"
9115 "iem: %.*Rhxs\n",
9116 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
9117 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
9118 iemVerifyAssertAddRecordDump(pEvtRec);
9119 iemVerifyAssertMsg2(pIemCpu);
9120 RTAssertPanic();
9121 }
9122 }
9123 }
9124 }
9125
9126}
9127
9128/**
9129 * Performs the post-execution verfication checks.
9130 */
9131static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
9132{
9133 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9134 return;
9135
9136 /*
9137 * Switch back the state.
9138 */
9139 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
9140 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
9141 Assert(pOrgCtx != pDebugCtx);
9142 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
9143
9144 /*
9145 * Execute the instruction in REM.
9146 */
9147 bool fRem = false;
9148 PVM pVM = IEMCPU_TO_VM(pIemCpu);
9149 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9150 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
9151#ifdef IEM_VERIFICATION_MODE_FULL_HM
9152 if ( HMIsEnabled(pVM)
9153 && pIemCpu->cIOReads == 0
9154 && pIemCpu->cIOWrites == 0
9155 && !pIemCpu->fProblematicMemory)
9156 {
9157 unsigned iLoops = 0;
9158 do
9159 {
9160 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
9161 iLoops++;
9162 } while ( rc == VINF_SUCCESS
9163 || ( rc == VINF_EM_DBG_STEPPED
9164 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9165 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
9166 || ( pOrgCtx->rip != pDebugCtx->rip
9167 && pIemCpu->uInjectCpl != UINT8_MAX
9168 && iLoops < 8) );
9169 }
9170#endif
9171 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
9172 || rc == VINF_IOM_R3_IOPORT_READ
9173 || rc == VINF_IOM_R3_IOPORT_WRITE
9174 || rc == VINF_IOM_R3_MMIO_READ
9175 || rc == VINF_IOM_R3_MMIO_READ_WRITE
9176 || rc == VINF_IOM_R3_MMIO_WRITE
9177 )
9178 {
9179 EMRemLock(pVM);
9180 rc = REMR3EmulateInstruction(pVM, pVCpu);
9181 AssertRC(rc);
9182 EMRemUnlock(pVM);
9183 fRem = true;
9184 }
9185
9186 /*
9187 * Compare the register states.
9188 */
9189 unsigned cDiffs = 0;
9190 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
9191 {
9192 //Log(("REM and IEM ends up with different registers!\n"));
9193 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
9194
9195# define CHECK_FIELD(a_Field) \
9196 do \
9197 { \
9198 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
9199 { \
9200 switch (sizeof(pOrgCtx->a_Field)) \
9201 { \
9202 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
9203 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
9204 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
9205 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
9206 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
9207 } \
9208 cDiffs++; \
9209 } \
9210 } while (0)
9211
9212# define CHECK_BIT_FIELD(a_Field) \
9213 do \
9214 { \
9215 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
9216 { \
9217 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
9218 cDiffs++; \
9219 } \
9220 } while (0)
9221
9222# define CHECK_SEL(a_Sel) \
9223 do \
9224 { \
9225 CHECK_FIELD(a_Sel.Sel); \
9226 CHECK_FIELD(a_Sel.Attr.u); \
9227 CHECK_FIELD(a_Sel.u64Base); \
9228 CHECK_FIELD(a_Sel.u32Limit); \
9229 CHECK_FIELD(a_Sel.fFlags); \
9230 } while (0)
9231
9232#if 1 /* The recompiler doesn't update these the intel way. */
9233 if (fRem)
9234 {
9235 pOrgCtx->fpu.FOP = pDebugCtx->fpu.FOP;
9236 pOrgCtx->fpu.FPUIP = pDebugCtx->fpu.FPUIP;
9237 pOrgCtx->fpu.CS = pDebugCtx->fpu.CS;
9238 pOrgCtx->fpu.Rsrvd1 = pDebugCtx->fpu.Rsrvd1;
9239 pOrgCtx->fpu.FPUDP = pDebugCtx->fpu.FPUDP;
9240 pOrgCtx->fpu.DS = pDebugCtx->fpu.DS;
9241 pOrgCtx->fpu.Rsrvd2 = pDebugCtx->fpu.Rsrvd2;
9242 //pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK;
9243 if ((pOrgCtx->fpu.FSW & X86_FSW_TOP_MASK) == (pDebugCtx->fpu.FSW & X86_FSW_TOP_MASK))
9244 pOrgCtx->fpu.FSW = pDebugCtx->fpu.FSW;
9245 }
9246#endif
9247 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))
9248 {
9249 RTAssertMsg2Weak(" the FPU state differs\n");
9250 cDiffs++;
9251 CHECK_FIELD(fpu.FCW);
9252 CHECK_FIELD(fpu.FSW);
9253 CHECK_FIELD(fpu.FTW);
9254 CHECK_FIELD(fpu.FOP);
9255 CHECK_FIELD(fpu.FPUIP);
9256 CHECK_FIELD(fpu.CS);
9257 CHECK_FIELD(fpu.Rsrvd1);
9258 CHECK_FIELD(fpu.FPUDP);
9259 CHECK_FIELD(fpu.DS);
9260 CHECK_FIELD(fpu.Rsrvd2);
9261 CHECK_FIELD(fpu.MXCSR);
9262 CHECK_FIELD(fpu.MXCSR_MASK);
9263 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]);
9264 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]);
9265 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]);
9266 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]);
9267 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]);
9268 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]);
9269 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]);
9270 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]);
9271 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]);
9272 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]);
9273 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]);
9274 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]);
9275 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]);
9276 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]);
9277 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]);
9278 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]);
9279 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]);
9280 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]);
9281 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]);
9282 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]);
9283 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]);
9284 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]);
9285 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]);
9286 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]);
9287 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++)
9288 CHECK_FIELD(fpu.au32RsrvdRest[i]);
9289 }
9290 CHECK_FIELD(rip);
9291 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
9292 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
9293 {
9294 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
9295 CHECK_BIT_FIELD(rflags.Bits.u1CF);
9296 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
9297 CHECK_BIT_FIELD(rflags.Bits.u1PF);
9298 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
9299 CHECK_BIT_FIELD(rflags.Bits.u1AF);
9300 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
9301 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
9302 CHECK_BIT_FIELD(rflags.Bits.u1SF);
9303 CHECK_BIT_FIELD(rflags.Bits.u1TF);
9304 CHECK_BIT_FIELD(rflags.Bits.u1IF);
9305 CHECK_BIT_FIELD(rflags.Bits.u1DF);
9306 CHECK_BIT_FIELD(rflags.Bits.u1OF);
9307 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
9308 CHECK_BIT_FIELD(rflags.Bits.u1NT);
9309 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
9310 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
9311 CHECK_BIT_FIELD(rflags.Bits.u1RF);
9312 CHECK_BIT_FIELD(rflags.Bits.u1VM);
9313 CHECK_BIT_FIELD(rflags.Bits.u1AC);
9314 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
9315 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
9316 CHECK_BIT_FIELD(rflags.Bits.u1ID);
9317 }
9318
9319 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
9320 CHECK_FIELD(rax);
9321 CHECK_FIELD(rcx);
9322 if (!pIemCpu->fIgnoreRaxRdx)
9323 CHECK_FIELD(rdx);
9324 CHECK_FIELD(rbx);
9325 CHECK_FIELD(rsp);
9326 CHECK_FIELD(rbp);
9327 CHECK_FIELD(rsi);
9328 CHECK_FIELD(rdi);
9329 CHECK_FIELD(r8);
9330 CHECK_FIELD(r9);
9331 CHECK_FIELD(r10);
9332 CHECK_FIELD(r11);
9333 CHECK_FIELD(r12);
9334 CHECK_FIELD(r13);
9335 CHECK_SEL(cs);
9336 CHECK_SEL(ss);
9337 CHECK_SEL(ds);
9338 CHECK_SEL(es);
9339 CHECK_SEL(fs);
9340 CHECK_SEL(gs);
9341 CHECK_FIELD(cr0);
9342
9343 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
9344 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
9345 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
9346 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
9347 if (pOrgCtx->cr2 != pDebugCtx->cr2)
9348 {
9349 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
9350 { /* ignore */ }
9351 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
9352 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
9353 && fRem)
9354 { /* ignore */ }
9355 else
9356 CHECK_FIELD(cr2);
9357 }
9358 CHECK_FIELD(cr3);
9359 CHECK_FIELD(cr4);
9360 CHECK_FIELD(dr[0]);
9361 CHECK_FIELD(dr[1]);
9362 CHECK_FIELD(dr[2]);
9363 CHECK_FIELD(dr[3]);
9364 CHECK_FIELD(dr[6]);
9365 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
9366 CHECK_FIELD(dr[7]);
9367 CHECK_FIELD(gdtr.cbGdt);
9368 CHECK_FIELD(gdtr.pGdt);
9369 CHECK_FIELD(idtr.cbIdt);
9370 CHECK_FIELD(idtr.pIdt);
9371 CHECK_SEL(ldtr);
9372 CHECK_SEL(tr);
9373 CHECK_FIELD(SysEnter.cs);
9374 CHECK_FIELD(SysEnter.eip);
9375 CHECK_FIELD(SysEnter.esp);
9376 CHECK_FIELD(msrEFER);
9377 CHECK_FIELD(msrSTAR);
9378 CHECK_FIELD(msrPAT);
9379 CHECK_FIELD(msrLSTAR);
9380 CHECK_FIELD(msrCSTAR);
9381 CHECK_FIELD(msrSFMASK);
9382 CHECK_FIELD(msrKERNELGSBASE);
9383
9384 if (cDiffs != 0)
9385 {
9386 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
9387 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
9388 iemVerifyAssertMsg2(pIemCpu);
9389 RTAssertPanic();
9390 }
9391# undef CHECK_FIELD
9392# undef CHECK_BIT_FIELD
9393 }
9394
9395 /*
9396 * If the register state compared fine, check the verification event
9397 * records.
9398 */
9399 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
9400 {
9401 /*
9402 * Compare verficiation event records.
9403 * - I/O port accesses should be a 1:1 match.
9404 */
9405 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
9406 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
9407 while (pIemRec && pOtherRec)
9408 {
9409 /* Since we might miss RAM writes and reads, ignore reads and check
9410 that any written memory is the same extra ones. */
9411 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
9412 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
9413 && pIemRec->pNext)
9414 {
9415 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9416 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
9417 pIemRec = pIemRec->pNext;
9418 }
9419
9420 /* Do the compare. */
9421 if (pIemRec->enmEvent != pOtherRec->enmEvent)
9422 {
9423 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
9424 break;
9425 }
9426 bool fEquals;
9427 switch (pIemRec->enmEvent)
9428 {
9429 case IEMVERIFYEVENT_IOPORT_READ:
9430 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
9431 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
9432 break;
9433 case IEMVERIFYEVENT_IOPORT_WRITE:
9434 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
9435 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
9436 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
9437 break;
9438 case IEMVERIFYEVENT_RAM_READ:
9439 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
9440 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
9441 break;
9442 case IEMVERIFYEVENT_RAM_WRITE:
9443 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
9444 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
9445 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
9446 break;
9447 default:
9448 fEquals = false;
9449 break;
9450 }
9451 if (!fEquals)
9452 {
9453 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
9454 break;
9455 }
9456
9457 /* advance */
9458 pIemRec = pIemRec->pNext;
9459 pOtherRec = pOtherRec->pNext;
9460 }
9461
9462 /* Ignore extra writes and reads. */
9463 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
9464 {
9465 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
9466 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
9467 pIemRec = pIemRec->pNext;
9468 }
9469 if (pIemRec != NULL)
9470 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
9471 else if (pOtherRec != NULL)
9472 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
9473 }
9474 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
9475}
9476
9477#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9478
9479/* stubs */
9480static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
9481{
9482 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
9483 return VERR_INTERNAL_ERROR;
9484}
9485
9486static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9487{
9488 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
9489 return VERR_INTERNAL_ERROR;
9490}
9491
9492#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
9493
9494
9495#ifdef LOG_ENABLED
9496/**
9497 * Logs the current instruction.
9498 * @param pVCpu The cross context virtual CPU structure of the caller.
9499 * @param pCtx The current CPU context.
9500 * @param fSameCtx Set if we have the same context information as the VMM,
9501 * clear if we may have already executed an instruction in
9502 * our debug context. When clear, we assume IEMCPU holds
9503 * valid CPU mode info.
9504 */
9505static void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
9506{
9507# ifdef IN_RING3
9508 if (LogIs2Enabled())
9509 {
9510 char szInstr[256];
9511 uint32_t cbInstr = 0;
9512 if (fSameCtx)
9513 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9514 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9515 szInstr, sizeof(szInstr), &cbInstr);
9516 else
9517 {
9518 uint32_t fFlags = 0;
9519 switch (pVCpu->iem.s.enmCpuMode)
9520 {
9521 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9522 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9523 case IEMMODE_16BIT:
9524 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
9525 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9526 else
9527 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9528 break;
9529 }
9530 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
9531 szInstr, sizeof(szInstr), &cbInstr);
9532 }
9533
9534 Log2(("****\n"
9535 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9536 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"
9537 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9538 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9539 " %s\n"
9540 ,
9541 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
9542 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,
9543 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
9544 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
9545 pCtx->fpu.FSW, pCtx->fpu.FCW, pCtx->fpu.FTW, pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK,
9546 szInstr));
9547
9548 if (LogIs3Enabled())
9549 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
9550 }
9551 else
9552# endif
9553 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
9554 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
9555}
9556#endif
9557
9558
9559/**
9560 * Makes status code addjustments (pass up from I/O and access handler)
9561 * as well as maintaining statistics.
9562 *
9563 * @returns Strict VBox status code to pass up.
9564 * @param pIemCpu The IEM per CPU data.
9565 * @param rcStrict The status from executing an instruction.
9566 */
9567DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
9568{
9569 if (rcStrict != VINF_SUCCESS)
9570 {
9571 if (RT_SUCCESS(rcStrict))
9572 {
9573 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
9574 || rcStrict == VINF_IOM_R3_IOPORT_READ
9575 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
9576 || rcStrict == VINF_IOM_R3_MMIO_READ
9577 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
9578 || rcStrict == VINF_IOM_R3_MMIO_WRITE
9579 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9580 int32_t const rcPassUp = pIemCpu->rcPassUp;
9581 if (rcPassUp == VINF_SUCCESS)
9582 pIemCpu->cRetInfStatuses++;
9583 else if ( rcPassUp < VINF_EM_FIRST
9584 || rcPassUp > VINF_EM_LAST
9585 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
9586 {
9587 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9588 pIemCpu->cRetPassUpStatus++;
9589 rcStrict = rcPassUp;
9590 }
9591 else
9592 {
9593 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
9594 pIemCpu->cRetInfStatuses++;
9595 }
9596 }
9597 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
9598 pIemCpu->cRetAspectNotImplemented++;
9599 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
9600 pIemCpu->cRetInstrNotImplemented++;
9601#ifdef IEM_VERIFICATION_MODE_FULL
9602 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
9603 rcStrict = VINF_SUCCESS;
9604#endif
9605 else
9606 pIemCpu->cRetErrStatuses++;
9607 }
9608 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
9609 {
9610 pIemCpu->cRetPassUpStatus++;
9611 rcStrict = pIemCpu->rcPassUp;
9612 }
9613
9614 return rcStrict;
9615}
9616
9617
9618/**
9619 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9620 * IEMExecOneWithPrefetchedByPC.
9621 *
9622 * @return Strict VBox status code.
9623 * @param pVCpu The current virtual CPU.
9624 * @param pIemCpu The IEM per CPU data.
9625 * @param fExecuteInhibit If set, execute the instruction following CLI,
9626 * POP SS and MOV SS,GR.
9627 */
9628DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
9629{
9630 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9631 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9632 if (rcStrict == VINF_SUCCESS)
9633 pIemCpu->cInstructions++;
9634 if (pIemCpu->cActiveMappings > 0)
9635 iemMemRollback(pIemCpu);
9636//#ifdef DEBUG
9637// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
9638//#endif
9639
9640 /* Execute the next instruction as well if a cli, pop ss or
9641 mov ss, Gr has just completed successfully. */
9642 if ( fExecuteInhibit
9643 && rcStrict == VINF_SUCCESS
9644 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9645 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
9646 {
9647 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
9648 if (rcStrict == VINF_SUCCESS)
9649 {
9650# ifdef LOG_ENABLED
9651 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
9652# endif
9653 b; IEM_OPCODE_GET_NEXT_U8(&b);
9654 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9655 if (rcStrict == VINF_SUCCESS)
9656 pIemCpu->cInstructions++;
9657 if (pIemCpu->cActiveMappings > 0)
9658 iemMemRollback(pIemCpu);
9659 }
9660 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
9661 }
9662
9663 /*
9664 * Return value fiddling, statistics and sanity assertions.
9665 */
9666 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
9667
9668 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
9669 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
9670#if defined(IEM_VERIFICATION_MODE_FULL)
9671 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
9672 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
9673 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
9674 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
9675#endif
9676 return rcStrict;
9677}
9678
9679
9680#ifdef IN_RC
9681/**
9682 * Re-enters raw-mode or ensure we return to ring-3.
9683 *
9684 * @returns rcStrict, maybe modified.
9685 * @param pIemCpu The IEM CPU structure.
9686 * @param pVCpu The cross context virtual CPU structure of the caller.
9687 * @param pCtx The current CPU context.
9688 * @param rcStrict The status code returne by the interpreter.
9689 */
9690DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
9691{
9692 if (!pIemCpu->fInPatchCode)
9693 CPUMRawEnter(pVCpu, CPUMCTX2CORE(pCtx));
9694 return rcStrict;
9695}
9696#endif
9697
9698
9699/**
9700 * Execute one instruction.
9701 *
9702 * @return Strict VBox status code.
9703 * @param pVCpu The current virtual CPU.
9704 */
9705VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
9706{
9707 PIEMCPU pIemCpu = &pVCpu->iem.s;
9708
9709#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9710 iemExecVerificationModeSetup(pIemCpu);
9711#endif
9712#ifdef LOG_ENABLED
9713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9714 iemLogCurInstr(pVCpu, pCtx, true);
9715#endif
9716
9717 /*
9718 * Do the decoding and emulation.
9719 */
9720 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9721 if (rcStrict == VINF_SUCCESS)
9722 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9723
9724#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9725 /*
9726 * Assert some sanity.
9727 */
9728 iemExecVerificationModeCheck(pIemCpu);
9729#endif
9730#ifdef IN_RC
9731 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9732#endif
9733 if (rcStrict != VINF_SUCCESS)
9734 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9735 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9736 return rcStrict;
9737}
9738
9739
9740VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9741{
9742 PIEMCPU pIemCpu = &pVCpu->iem.s;
9743 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9744 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9745
9746 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9747 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9748 if (rcStrict == VINF_SUCCESS)
9749 {
9750 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9751 if (pcbWritten)
9752 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9753 }
9754
9755#ifdef IN_RC
9756 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9757#endif
9758 return rcStrict;
9759}
9760
9761
9762VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9763 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9764{
9765 PIEMCPU pIemCpu = &pVCpu->iem.s;
9766 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9767 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9768
9769 VBOXSTRICTRC rcStrict;
9770 if ( cbOpcodeBytes
9771 && pCtx->rip == OpcodeBytesPC)
9772 {
9773 iemInitDecoder(pIemCpu, false);
9774 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9775 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9776 rcStrict = VINF_SUCCESS;
9777 }
9778 else
9779 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9780 if (rcStrict == VINF_SUCCESS)
9781 {
9782 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9783 }
9784
9785#ifdef IN_RC
9786 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9787#endif
9788 return rcStrict;
9789}
9790
9791
9792VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9793{
9794 PIEMCPU pIemCpu = &pVCpu->iem.s;
9795 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9796 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9797
9798 uint32_t const cbOldWritten = pIemCpu->cbWritten;
9799 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9800 if (rcStrict == VINF_SUCCESS)
9801 {
9802 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9803 if (pcbWritten)
9804 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
9805 }
9806
9807#ifdef IN_RC
9808 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9809#endif
9810 return rcStrict;
9811}
9812
9813
9814VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9815 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9816{
9817 PIEMCPU pIemCpu = &pVCpu->iem.s;
9818 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9819 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
9820
9821 VBOXSTRICTRC rcStrict;
9822 if ( cbOpcodeBytes
9823 && pCtx->rip == OpcodeBytesPC)
9824 {
9825 iemInitDecoder(pIemCpu, true);
9826 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
9827 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
9828 rcStrict = VINF_SUCCESS;
9829 }
9830 else
9831 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
9832 if (rcStrict == VINF_SUCCESS)
9833 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
9834
9835#ifdef IN_RC
9836 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
9837#endif
9838 return rcStrict;
9839}
9840
9841
9842VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
9843{
9844 PIEMCPU pIemCpu = &pVCpu->iem.s;
9845
9846 /*
9847 * See if there is an interrupt pending in TRPM and inject it if we can.
9848 */
9849#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
9850 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9851# ifdef IEM_VERIFICATION_MODE_FULL
9852 pIemCpu->uInjectCpl = UINT8_MAX;
9853# endif
9854 if ( pCtx->eflags.Bits.u1IF
9855 && TRPMHasTrap(pVCpu)
9856 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
9857 {
9858 uint8_t u8TrapNo;
9859 TRPMEVENT enmType;
9860 RTGCUINT uErrCode;
9861 RTGCPTR uCr2;
9862 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9863 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2);
9864 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9865 TRPMResetTrap(pVCpu);
9866 }
9867#else
9868 iemExecVerificationModeSetup(pIemCpu);
9869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9870#endif
9871
9872 /*
9873 * Log the state.
9874 */
9875#ifdef LOG_ENABLED
9876 iemLogCurInstr(pVCpu, pCtx, true);
9877#endif
9878
9879 /*
9880 * Do the decoding and emulation.
9881 */
9882 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
9883 if (rcStrict == VINF_SUCCESS)
9884 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
9885
9886#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9887 /*
9888 * Assert some sanity.
9889 */
9890 iemExecVerificationModeCheck(pIemCpu);
9891#endif
9892
9893 /*
9894 * Maybe re-enter raw-mode and log.
9895 */
9896#ifdef IN_RC
9897 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
9898#endif
9899 if (rcStrict != VINF_SUCCESS)
9900 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9901 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9902 return rcStrict;
9903}
9904
9905
9906
9907/**
9908 * Injects a trap, fault, abort, software interrupt or external interrupt.
9909 *
9910 * The parameter list matches TRPMQueryTrapAll pretty closely.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The current virtual CPU.
9914 * @param u8TrapNo The trap number.
9915 * @param enmType What type is it (trap/fault/abort), software
9916 * interrupt or hardware interrupt.
9917 * @param uErrCode The error code if applicable.
9918 * @param uCr2 The CR2 value if applicable.
9919 */
9920VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
9921{
9922 iemInitDecoder(&pVCpu->iem.s, false);
9923#ifdef DBGFTRACE_ENABLED
9924 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9925 u8TrapNo, enmType, uErrCode, uCr2);
9926#endif
9927
9928 uint32_t fFlags;
9929 switch (enmType)
9930 {
9931 case TRPM_HARDWARE_INT:
9932 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9933 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9934 uErrCode = uCr2 = 0;
9935 break;
9936
9937 case TRPM_SOFTWARE_INT:
9938 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9939 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9940 uErrCode = uCr2 = 0;
9941 break;
9942
9943 case TRPM_TRAP:
9944 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9945 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9946 if (u8TrapNo == X86_XCPT_PF)
9947 fFlags |= IEM_XCPT_FLAGS_CR2;
9948 switch (u8TrapNo)
9949 {
9950 case X86_XCPT_DF:
9951 case X86_XCPT_TS:
9952 case X86_XCPT_NP:
9953 case X86_XCPT_SS:
9954 case X86_XCPT_PF:
9955 case X86_XCPT_AC:
9956 fFlags |= IEM_XCPT_FLAGS_ERR;
9957 break;
9958 }
9959 break;
9960
9961 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9962 }
9963
9964 return iemRaiseXcptOrInt(&pVCpu->iem.s, 0, u8TrapNo, fFlags, uErrCode, uCr2);
9965}
9966
9967
9968VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9969{
9970 return VERR_NOT_IMPLEMENTED;
9971}
9972
9973
9974VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9975{
9976 return VERR_NOT_IMPLEMENTED;
9977}
9978
9979
9980#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
9981/**
9982 * Executes a IRET instruction with default operand size.
9983 *
9984 * This is for PATM.
9985 *
9986 * @returns VBox status code.
9987 * @param pVCpu The current virtual CPU.
9988 * @param pCtxCore The register frame.
9989 */
9990VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
9991{
9992 PIEMCPU pIemCpu = &pVCpu->iem.s;
9993 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
9994
9995 iemCtxCoreToCtx(pCtx, pCtxCore);
9996 iemInitDecoder(pIemCpu);
9997 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
9998 if (rcStrict == VINF_SUCCESS)
9999 iemCtxToCtxCore(pCtxCore, pCtx);
10000 else
10001 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10002 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10003 return rcStrict;
10004}
10005#endif
10006
10007
10008
10009/**
10010 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10011 *
10012 * This API ASSUMES that the caller has already verified that the guest code is
10013 * allowed to access the I/O port. (The I/O port is in the DX register in the
10014 * guest state.)
10015 *
10016 * @returns Strict VBox status code.
10017 * @param pVCpu The cross context per virtual CPU structure.
10018 * @param cbValue The size of the I/O port access (1, 2, or 4).
10019 * @param enmAddrMode The addressing mode.
10020 * @param fRepPrefix Indicates whether a repeat prefix is used
10021 * (doesn't matter which for this instruction).
10022 * @param cbInstr The instruction length in bytes.
10023 * @param iEffSeg The effective segment address.
10024 */
10025VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10026 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
10027{
10028 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10029 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
10030
10031 /*
10032 * State init.
10033 */
10034 PIEMCPU pIemCpu = &pVCpu->iem.s;
10035 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
10036
10037 /*
10038 * Switch orgy for getting to the right handler.
10039 */
10040 VBOXSTRICTRC rcStrict;
10041 if (fRepPrefix)
10042 {
10043 switch (enmAddrMode)
10044 {
10045 case IEMMODE_16BIT:
10046 switch (cbValue)
10047 {
10048 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10049 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10050 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10051 default:
10052 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10053 }
10054 break;
10055
10056 case IEMMODE_32BIT:
10057 switch (cbValue)
10058 {
10059 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10060 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10061 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10062 default:
10063 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10064 }
10065 break;
10066
10067 case IEMMODE_64BIT:
10068 switch (cbValue)
10069 {
10070 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10071 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10072 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10073 default:
10074 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10075 }
10076 break;
10077
10078 default:
10079 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10080 }
10081 }
10082 else
10083 {
10084 switch (enmAddrMode)
10085 {
10086 case IEMMODE_16BIT:
10087 switch (cbValue)
10088 {
10089 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10090 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10091 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10092 default:
10093 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10094 }
10095 break;
10096
10097 case IEMMODE_32BIT:
10098 switch (cbValue)
10099 {
10100 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10101 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10102 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10103 default:
10104 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10105 }
10106 break;
10107
10108 case IEMMODE_64BIT:
10109 switch (cbValue)
10110 {
10111 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10112 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10113 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
10114 default:
10115 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10116 }
10117 break;
10118
10119 default:
10120 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10121 }
10122 }
10123
10124 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10125}
10126
10127
10128/**
10129 * Interface for HM and EM for executing string I/O IN (read) instructions.
10130 *
10131 * This API ASSUMES that the caller has already verified that the guest code is
10132 * allowed to access the I/O port. (The I/O port is in the DX register in the
10133 * guest state.)
10134 *
10135 * @returns Strict VBox status code.
10136 * @param pVCpu The cross context per virtual CPU structure.
10137 * @param cbValue The size of the I/O port access (1, 2, or 4).
10138 * @param enmAddrMode The addressing mode.
10139 * @param fRepPrefix Indicates whether a repeat prefix is used
10140 * (doesn't matter which for this instruction).
10141 * @param cbInstr The instruction length in bytes.
10142 */
10143VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10144 bool fRepPrefix, uint8_t cbInstr)
10145{
10146 AssertReturn(cbInstr - 1U <= 14U, VERR_IEM_INVALID_INSTR_LENGTH);
10147
10148 /*
10149 * State init.
10150 */
10151 PIEMCPU pIemCpu = &pVCpu->iem.s;
10152 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
10153
10154 /*
10155 * Switch orgy for getting to the right handler.
10156 */
10157 VBOXSTRICTRC rcStrict;
10158 if (fRepPrefix)
10159 {
10160 switch (enmAddrMode)
10161 {
10162 case IEMMODE_16BIT:
10163 switch (cbValue)
10164 {
10165 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10166 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10167 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10168 default:
10169 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10170 }
10171 break;
10172
10173 case IEMMODE_32BIT:
10174 switch (cbValue)
10175 {
10176 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10177 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10178 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10179 default:
10180 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10181 }
10182 break;
10183
10184 case IEMMODE_64BIT:
10185 switch (cbValue)
10186 {
10187 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10188 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10189 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10190 default:
10191 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10192 }
10193 break;
10194
10195 default:
10196 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10197 }
10198 }
10199 else
10200 {
10201 switch (enmAddrMode)
10202 {
10203 case IEMMODE_16BIT:
10204 switch (cbValue)
10205 {
10206 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10207 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10208 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10209 default:
10210 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10211 }
10212 break;
10213
10214 case IEMMODE_32BIT:
10215 switch (cbValue)
10216 {
10217 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10218 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10219 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10220 default:
10221 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10222 }
10223 break;
10224
10225 case IEMMODE_64BIT:
10226 switch (cbValue)
10227 {
10228 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10229 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10230 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
10231 default:
10232 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10233 }
10234 break;
10235
10236 default:
10237 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10238 }
10239 }
10240
10241 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10242}
10243
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette