VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65463

Last change on this file since 65463 was 65463, checked in by vboxsync, 8 years ago

iemInitDecoderAndPrefetchOpcodes: Some MSC compatible branch predicting.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 575.0 KB
Line 
1/* $Id: IEMAll.cpp 65463 2017-01-26 17:17:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
878# ifdef IEM_WITH_CODE_TLB
879 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
880 pVCpu->iem.s.pbInstrBuf = NULL;
881 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
882 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
883 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
884 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
885# else
886 pVCpu->iem.s.offOpcode = 127;
887 pVCpu->iem.s.cbOpcode = 127;
888# endif
889#endif
890
891 pVCpu->iem.s.cActiveMappings = 0;
892 pVCpu->iem.s.iNextMapping = 0;
893 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
894 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
895#ifdef VBOX_WITH_RAW_MODE_NOT_R0
896 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
897 && pCtx->cs.u64Base == 0
898 && pCtx->cs.u32Limit == UINT32_MAX
899 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
900 if (!pVCpu->iem.s.fInPatchCode)
901 CPUMRawLeave(pVCpu, VINF_SUCCESS);
902#endif
903
904#ifdef IEM_VERIFICATION_MODE_FULL
905 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
906 pVCpu->iem.s.fNoRem = true;
907#endif
908}
909
910
911/**
912 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
913 *
914 * @param pVCpu The cross context virtual CPU structure of the
915 * calling thread.
916 */
917DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
918{
919 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
920#ifdef IEM_VERIFICATION_MODE_FULL
921 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
922#endif
923#ifdef VBOX_STRICT
924# ifdef IEM_WITH_CODE_TLB
925 NOREF(pVCpu);
926# else
927 pVCpu->iem.s.cbOpcode = 0;
928# endif
929#else
930 NOREF(pVCpu);
931#endif
932}
933
934
935/**
936 * Initializes the decoder state.
937 *
938 * iemReInitDecoder is mostly a copy of this function.
939 *
940 * @param pVCpu The cross context virtual CPU structure of the
941 * calling thread.
942 * @param fBypassHandlers Whether to bypass access handlers.
943 */
944DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
945{
946 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
947
948 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
949
950#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
952 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
959#endif
960
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
963#endif
964 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
965#ifdef IEM_VERIFICATION_MODE_FULL
966 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
967 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
968#endif
969 IEMMODE enmMode = iemCalcCpuMode(pCtx);
970 pVCpu->iem.s.enmCpuMode = enmMode;
971 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
972 pVCpu->iem.s.enmEffAddrMode = enmMode;
973 if (enmMode != IEMMODE_64BIT)
974 {
975 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
976 pVCpu->iem.s.enmEffOpSize = enmMode;
977 }
978 else
979 {
980 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
981 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
982 }
983 pVCpu->iem.s.fPrefixes = 0;
984 pVCpu->iem.s.uRexReg = 0;
985 pVCpu->iem.s.uRexB = 0;
986 pVCpu->iem.s.uRexIndex = 0;
987 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
988#ifdef IEM_WITH_CODE_TLB
989 pVCpu->iem.s.pbInstrBuf = NULL;
990 pVCpu->iem.s.offInstrNextByte = 0;
991 pVCpu->iem.s.offCurInstrStart = 0;
992# ifdef VBOX_STRICT
993 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
994 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
995 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
996# endif
997#else
998 pVCpu->iem.s.offOpcode = 0;
999 pVCpu->iem.s.cbOpcode = 0;
1000#endif
1001 pVCpu->iem.s.cActiveMappings = 0;
1002 pVCpu->iem.s.iNextMapping = 0;
1003 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1004 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1005#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1006 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1007 && pCtx->cs.u64Base == 0
1008 && pCtx->cs.u32Limit == UINT32_MAX
1009 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1010 if (!pVCpu->iem.s.fInPatchCode)
1011 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1012#endif
1013
1014#ifdef DBGFTRACE_ENABLED
1015 switch (enmMode)
1016 {
1017 case IEMMODE_64BIT:
1018 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1019 break;
1020 case IEMMODE_32BIT:
1021 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1022 break;
1023 case IEMMODE_16BIT:
1024 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1025 break;
1026 }
1027#endif
1028}
1029
1030
1031/**
1032 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1033 *
1034 * This is mostly a copy of iemInitDecoder.
1035 *
1036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1037 */
1038DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1039{
1040 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1041
1042 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1043
1044#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1052 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1053#endif
1054
1055 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1056#ifdef IEM_VERIFICATION_MODE_FULL
1057 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1058 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1059#endif
1060 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1061 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1062 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1063 pVCpu->iem.s.enmEffAddrMode = enmMode;
1064 if (enmMode != IEMMODE_64BIT)
1065 {
1066 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1067 pVCpu->iem.s.enmEffOpSize = enmMode;
1068 }
1069 else
1070 {
1071 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1072 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1073 }
1074 pVCpu->iem.s.fPrefixes = 0;
1075 pVCpu->iem.s.uRexReg = 0;
1076 pVCpu->iem.s.uRexB = 0;
1077 pVCpu->iem.s.uRexIndex = 0;
1078 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1079#ifdef IEM_WITH_CODE_TLB
1080 if (pVCpu->iem.s.pbInstrBuf)
1081 {
1082 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1083 - pVCpu->iem.s.uInstrBufPc;
1084 if (off < pVCpu->iem.s.cbInstrBufTotal)
1085 {
1086 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1087 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1088 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1089 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1090 else
1091 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1092 }
1093 else
1094 {
1095 pVCpu->iem.s.pbInstrBuf = NULL;
1096 pVCpu->iem.s.offInstrNextByte = 0;
1097 pVCpu->iem.s.offCurInstrStart = 0;
1098 pVCpu->iem.s.cbInstrBuf = 0;
1099 pVCpu->iem.s.cbInstrBufTotal = 0;
1100 }
1101 }
1102 else
1103 {
1104 pVCpu->iem.s.offInstrNextByte = 0;
1105 pVCpu->iem.s.offCurInstrStart = 0;
1106 pVCpu->iem.s.cbInstrBuf = 0;
1107 pVCpu->iem.s.cbInstrBufTotal = 0;
1108 }
1109#else
1110 pVCpu->iem.s.cbOpcode = 0;
1111 pVCpu->iem.s.offOpcode = 0;
1112#endif
1113 Assert(pVCpu->iem.s.cActiveMappings == 0);
1114 pVCpu->iem.s.iNextMapping = 0;
1115 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1116 Assert(pVCpu->iem.s.fBypassHandlers == false);
1117#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1118 if (!pVCpu->iem.s.fInPatchCode)
1119 { /* likely */ }
1120 else
1121 {
1122 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1123 && pCtx->cs.u64Base == 0
1124 && pCtx->cs.u32Limit == UINT32_MAX
1125 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1126 if (!pVCpu->iem.s.fInPatchCode)
1127 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1128 }
1129#endif
1130
1131#ifdef DBGFTRACE_ENABLED
1132 switch (enmMode)
1133 {
1134 case IEMMODE_64BIT:
1135 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1136 break;
1137 case IEMMODE_32BIT:
1138 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1139 break;
1140 case IEMMODE_16BIT:
1141 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1142 break;
1143 }
1144#endif
1145}
1146
1147
1148
1149/**
1150 * Prefetch opcodes the first time when starting executing.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param fBypassHandlers Whether to bypass access handlers.
1156 */
1157IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1158{
1159#ifdef IEM_VERIFICATION_MODE_FULL
1160 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1161#endif
1162 iemInitDecoder(pVCpu, fBypassHandlers);
1163
1164#ifdef IEM_WITH_CODE_TLB
1165 /** @todo Do ITLB lookup here. */
1166
1167#else /* !IEM_WITH_CODE_TLB */
1168
1169 /*
1170 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1171 *
1172 * First translate CS:rIP to a physical address.
1173 */
1174 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1175 uint32_t cbToTryRead;
1176 RTGCPTR GCPtrPC;
1177 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1178 {
1179 cbToTryRead = PAGE_SIZE;
1180 GCPtrPC = pCtx->rip;
1181 if (IEM_IS_CANONICAL(GCPtrPC))
1182 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1183 else
1184 return iemRaiseGeneralProtectionFault0(pVCpu);
1185 }
1186 else
1187 {
1188 uint32_t GCPtrPC32 = pCtx->eip;
1189 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1190 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1191 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1192 else
1193 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1194 if (cbToTryRead) { /* likely */ }
1195 else /* overflowed */
1196 {
1197 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1198 cbToTryRead = UINT32_MAX;
1199 }
1200 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1201 Assert(GCPtrPC <= UINT32_MAX);
1202 }
1203
1204# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1205 /* Allow interpretation of patch manager code blocks since they can for
1206 instance throw #PFs for perfectly good reasons. */
1207 if (pVCpu->iem.s.fInPatchCode)
1208 {
1209 size_t cbRead = 0;
1210 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1211 AssertRCReturn(rc, rc);
1212 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1213 return VINF_SUCCESS;
1214 }
1215# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1216
1217 RTGCPHYS GCPhys;
1218 uint64_t fFlags;
1219 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1220 if (RT_SUCCESS(rc)) { /* probable */ }
1221 else
1222 {
1223 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1224 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1225 }
1226 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1227 else
1228 {
1229 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1230 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1231 }
1232 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1233 else
1234 {
1235 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1236 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1237 }
1238 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1239 /** @todo Check reserved bits and such stuff. PGM is better at doing
1240 * that, so do it when implementing the guest virtual address
1241 * TLB... */
1242
1243# ifdef IEM_VERIFICATION_MODE_FULL
1244 /*
1245 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1246 * instruction.
1247 */
1248 /** @todo optimize this differently by not using PGMPhysRead. */
1249 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1250 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1251 if ( offPrevOpcodes < cbOldOpcodes
1252 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1253 {
1254 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1255 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1256 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1257 pVCpu->iem.s.cbOpcode = cbNew;
1258 return VINF_SUCCESS;
1259 }
1260# endif
1261
1262 /*
1263 * Read the bytes at this address.
1264 */
1265 PVM pVM = pVCpu->CTX_SUFF(pVM);
1266# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1267 size_t cbActual;
1268 if ( PATMIsEnabled(pVM)
1269 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1270 {
1271 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1272 Assert(cbActual > 0);
1273 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1274 }
1275 else
1276# endif
1277 {
1278 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1279 if (cbToTryRead > cbLeftOnPage)
1280 cbToTryRead = cbLeftOnPage;
1281 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1282 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1283
1284 if (!pVCpu->iem.s.fBypassHandlers)
1285 {
1286 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1287 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1288 { /* likely */ }
1289 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1290 {
1291 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1292 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1294 }
1295 else
1296 {
1297 Log((RT_SUCCESS(rcStrict)
1298 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1299 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1300 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1301 return rcStrict;
1302 }
1303 }
1304 else
1305 {
1306 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1307 if (RT_SUCCESS(rc))
1308 { /* likely */ }
1309 else
1310 {
1311 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1312 GCPtrPC, GCPhys, rc, cbToTryRead));
1313 return rc;
1314 }
1315 }
1316 pVCpu->iem.s.cbOpcode = cbToTryRead;
1317 }
1318#endif /* !IEM_WITH_CODE_TLB */
1319 return VINF_SUCCESS;
1320}
1321
1322
1323/**
1324 * Invalidates the IEM TLBs.
1325 *
1326 * This is called internally as well as by PGM when moving GC mappings.
1327 *
1328 * @returns
1329 * @param pVCpu The cross context virtual CPU structure of the calling
1330 * thread.
1331 * @param fVmm Set when PGM calls us with a remapping.
1332 */
1333VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1334{
1335#ifdef IEM_WITH_CODE_TLB
1336 pVCpu->iem.s.cbInstrBufTotal = 0;
1337 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1338 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1339 { /* very likely */ }
1340 else
1341 {
1342 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1343 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1344 while (i-- > 0)
1345 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1346 }
1347#endif
1348
1349#ifdef IEM_WITH_DATA_TLB
1350 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1351 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1352 { /* very likely */ }
1353 else
1354 {
1355 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1356 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1357 while (i-- > 0)
1358 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1359 }
1360#endif
1361 NOREF(pVCpu); NOREF(fVmm);
1362}
1363
1364
1365/**
1366 * Invalidates a page in the TLBs.
1367 *
1368 * @param pVCpu The cross context virtual CPU structure of the calling
1369 * thread.
1370 * @param GCPtr The address of the page to invalidate
1371 */
1372VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1373{
1374#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1375 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1376 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1377 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1378 uintptr_t idx = (uint8_t)GCPtr;
1379
1380# ifdef IEM_WITH_CODE_TLB
1381 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1382 {
1383 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1384 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1385 pVCpu->iem.s.cbInstrBufTotal = 0;
1386 }
1387# endif
1388
1389# ifdef IEM_WITH_DATA_TLB
1390 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1391 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1392# endif
1393#else
1394 NOREF(pVCpu); NOREF(GCPtr);
1395#endif
1396}
1397
1398
1399/**
1400 * Invalidates the host physical aspects of the IEM TLBs.
1401 *
1402 * This is called internally as well as by PGM when moving GC mappings.
1403 *
1404 * @param pVCpu The cross context virtual CPU structure of the calling
1405 * thread.
1406 */
1407VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1408{
1409#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1410 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1411
1412# ifdef IEM_WITH_CODE_TLB
1413 pVCpu->iem.s.cbInstrBufTotal = 0;
1414# endif
1415 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1416 if (uTlbPhysRev != 0)
1417 {
1418 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1419 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1420 }
1421 else
1422 {
1423 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1424 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1425
1426 unsigned i;
1427# ifdef IEM_WITH_CODE_TLB
1428 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1429 while (i-- > 0)
1430 {
1431 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1432 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1433 }
1434# endif
1435# ifdef IEM_WITH_DATA_TLB
1436 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1437 while (i-- > 0)
1438 {
1439 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1440 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1441 }
1442# endif
1443 }
1444#else
1445 NOREF(pVCpu);
1446#endif
1447}
1448
1449
1450/**
1451 * Invalidates the host physical aspects of the IEM TLBs.
1452 *
1453 * This is called internally as well as by PGM when moving GC mappings.
1454 *
1455 * @param pVM The cross context VM structure.
1456 *
1457 * @remarks Caller holds the PGM lock.
1458 */
1459VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1460{
1461 RT_NOREF_PV(pVM);
1462}
1463
1464#ifdef IEM_WITH_CODE_TLB
1465
1466/**
1467 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1468 * failure and jumps.
1469 *
1470 * We end up here for a number of reasons:
1471 * - pbInstrBuf isn't yet initialized.
1472 * - Advancing beyond the buffer boundrary (e.g. cross page).
1473 * - Advancing beyond the CS segment limit.
1474 * - Fetching from non-mappable page (e.g. MMIO).
1475 *
1476 * @param pVCpu The cross context virtual CPU structure of the
1477 * calling thread.
1478 * @param pvDst Where to return the bytes.
1479 * @param cbDst Number of bytes to read.
1480 *
1481 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1482 */
1483IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1484{
1485#ifdef IN_RING3
1486//__debugbreak();
1487 for (;;)
1488 {
1489 Assert(cbDst <= 8);
1490 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1491
1492 /*
1493 * We might have a partial buffer match, deal with that first to make the
1494 * rest simpler. This is the first part of the cross page/buffer case.
1495 */
1496 if (pVCpu->iem.s.pbInstrBuf != NULL)
1497 {
1498 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1499 {
1500 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1501 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1502 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1503
1504 cbDst -= cbCopy;
1505 pvDst = (uint8_t *)pvDst + cbCopy;
1506 offBuf += cbCopy;
1507 pVCpu->iem.s.offInstrNextByte += offBuf;
1508 }
1509 }
1510
1511 /*
1512 * Check segment limit, figuring how much we're allowed to access at this point.
1513 *
1514 * We will fault immediately if RIP is past the segment limit / in non-canonical
1515 * territory. If we do continue, there are one or more bytes to read before we
1516 * end up in trouble and we need to do that first before faulting.
1517 */
1518 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1519 RTGCPTR GCPtrFirst;
1520 uint32_t cbMaxRead;
1521 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1522 {
1523 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1524 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1525 { /* likely */ }
1526 else
1527 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1528 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1529 }
1530 else
1531 {
1532 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1533 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1534 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1535 { /* likely */ }
1536 else
1537 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1538 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1539 if (cbMaxRead != 0)
1540 { /* likely */ }
1541 else
1542 {
1543 /* Overflowed because address is 0 and limit is max. */
1544 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1545 cbMaxRead = X86_PAGE_SIZE;
1546 }
1547 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1548 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1549 if (cbMaxRead2 < cbMaxRead)
1550 cbMaxRead = cbMaxRead2;
1551 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1552 }
1553
1554 /*
1555 * Get the TLB entry for this piece of code.
1556 */
1557 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1559 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1560 if (pTlbe->uTag == uTag)
1561 {
1562 /* likely when executing lots of code, otherwise unlikely */
1563# ifdef VBOX_WITH_STATISTICS
1564 pVCpu->iem.s.CodeTlb.cTlbHits++;
1565# endif
1566 }
1567 else
1568 {
1569 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1570# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1571 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1572 {
1573 pTlbe->uTag = uTag;
1574 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1575 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1576 pTlbe->GCPhys = NIL_RTGCPHYS;
1577 pTlbe->pbMappingR3 = NULL;
1578 }
1579 else
1580# endif
1581 {
1582 RTGCPHYS GCPhys;
1583 uint64_t fFlags;
1584 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1585 if (RT_FAILURE(rc))
1586 {
1587 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1588 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1589 }
1590
1591 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1592 pTlbe->uTag = uTag;
1593 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1594 pTlbe->GCPhys = GCPhys;
1595 pTlbe->pbMappingR3 = NULL;
1596 }
1597 }
1598
1599 /*
1600 * Check TLB page table level access flags.
1601 */
1602 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1603 {
1604 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1605 {
1606 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1607 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1608 }
1609 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1610 {
1611 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1612 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1613 }
1614 }
1615
1616# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1617 /*
1618 * Allow interpretation of patch manager code blocks since they can for
1619 * instance throw #PFs for perfectly good reasons.
1620 */
1621 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1622 { /* no unlikely */ }
1623 else
1624 {
1625 /** @todo Could be optimized this a little in ring-3 if we liked. */
1626 size_t cbRead = 0;
1627 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1628 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1629 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1630 return;
1631 }
1632# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1633
1634 /*
1635 * Look up the physical page info if necessary.
1636 */
1637 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1638 { /* not necessary */ }
1639 else
1640 {
1641 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1642 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1643 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1644 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1645 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1646 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1647 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1648 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1649 }
1650
1651# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1652 /*
1653 * Try do a direct read using the pbMappingR3 pointer.
1654 */
1655 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1656 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1657 {
1658 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1659 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1660 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1661 {
1662 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1663 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1664 }
1665 else
1666 {
1667 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1668 Assert(cbInstr < cbMaxRead);
1669 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1670 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1671 }
1672 if (cbDst <= cbMaxRead)
1673 {
1674 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1675 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1676 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1677 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1678 return;
1679 }
1680 pVCpu->iem.s.pbInstrBuf = NULL;
1681
1682 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1683 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1684 }
1685 else
1686# endif
1687#if 0
1688 /*
1689 * If there is no special read handling, so we can read a bit more and
1690 * put it in the prefetch buffer.
1691 */
1692 if ( cbDst < cbMaxRead
1693 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1694 {
1695 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1696 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1697 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1698 { /* likely */ }
1699 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1700 {
1701 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1702 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1703 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1704 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1705 }
1706 else
1707 {
1708 Log((RT_SUCCESS(rcStrict)
1709 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1710 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1711 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1712 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1713 }
1714 }
1715 /*
1716 * Special read handling, so only read exactly what's needed.
1717 * This is a highly unlikely scenario.
1718 */
1719 else
1720#endif
1721 {
1722 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1723 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1724 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1725 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1726 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1727 { /* likely */ }
1728 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1729 {
1730 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1731 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1732 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1733 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1734 }
1735 else
1736 {
1737 Log((RT_SUCCESS(rcStrict)
1738 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1739 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1740 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1741 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1742 }
1743 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1744 if (cbToRead == cbDst)
1745 return;
1746 }
1747
1748 /*
1749 * More to read, loop.
1750 */
1751 cbDst -= cbMaxRead;
1752 pvDst = (uint8_t *)pvDst + cbMaxRead;
1753 }
1754#else
1755 RT_NOREF(pvDst, cbDst);
1756 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1757#endif
1758}
1759
1760#else
1761
1762/**
1763 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1764 * exception if it fails.
1765 *
1766 * @returns Strict VBox status code.
1767 * @param pVCpu The cross context virtual CPU structure of the
1768 * calling thread.
1769 * @param cbMin The minimum number of bytes relative offOpcode
1770 * that must be read.
1771 */
1772IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1773{
1774 /*
1775 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1776 *
1777 * First translate CS:rIP to a physical address.
1778 */
1779 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1780 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1781 uint32_t cbToTryRead;
1782 RTGCPTR GCPtrNext;
1783 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1784 {
1785 cbToTryRead = PAGE_SIZE;
1786 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1787 if (!IEM_IS_CANONICAL(GCPtrNext))
1788 return iemRaiseGeneralProtectionFault0(pVCpu);
1789 }
1790 else
1791 {
1792 uint32_t GCPtrNext32 = pCtx->eip;
1793 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1794 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1795 if (GCPtrNext32 > pCtx->cs.u32Limit)
1796 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1797 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1798 if (!cbToTryRead) /* overflowed */
1799 {
1800 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1801 cbToTryRead = UINT32_MAX;
1802 /** @todo check out wrapping around the code segment. */
1803 }
1804 if (cbToTryRead < cbMin - cbLeft)
1805 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1806 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1807 }
1808
1809 /* Only read up to the end of the page, and make sure we don't read more
1810 than the opcode buffer can hold. */
1811 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1812 if (cbToTryRead > cbLeftOnPage)
1813 cbToTryRead = cbLeftOnPage;
1814 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1815 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1816/** @todo r=bird: Convert assertion into undefined opcode exception? */
1817 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1818
1819# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1820 /* Allow interpretation of patch manager code blocks since they can for
1821 instance throw #PFs for perfectly good reasons. */
1822 if (pVCpu->iem.s.fInPatchCode)
1823 {
1824 size_t cbRead = 0;
1825 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1826 AssertRCReturn(rc, rc);
1827 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1828 return VINF_SUCCESS;
1829 }
1830# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1831
1832 RTGCPHYS GCPhys;
1833 uint64_t fFlags;
1834 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1835 if (RT_FAILURE(rc))
1836 {
1837 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1838 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1839 }
1840 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1841 {
1842 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1843 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1844 }
1845 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1846 {
1847 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1848 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1849 }
1850 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1851 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1852 /** @todo Check reserved bits and such stuff. PGM is better at doing
1853 * that, so do it when implementing the guest virtual address
1854 * TLB... */
1855
1856 /*
1857 * Read the bytes at this address.
1858 *
1859 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1860 * and since PATM should only patch the start of an instruction there
1861 * should be no need to check again here.
1862 */
1863 if (!pVCpu->iem.s.fBypassHandlers)
1864 {
1865 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1866 cbToTryRead, PGMACCESSORIGIN_IEM);
1867 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1868 { /* likely */ }
1869 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1870 {
1871 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1872 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1873 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1874 }
1875 else
1876 {
1877 Log((RT_SUCCESS(rcStrict)
1878 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1879 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1880 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1881 return rcStrict;
1882 }
1883 }
1884 else
1885 {
1886 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1887 if (RT_SUCCESS(rc))
1888 { /* likely */ }
1889 else
1890 {
1891 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1892 return rc;
1893 }
1894 }
1895 pVCpu->iem.s.cbOpcode += cbToTryRead;
1896 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1897
1898 return VINF_SUCCESS;
1899}
1900
1901#endif /* !IEM_WITH_CODE_TLB */
1902#ifndef IEM_WITH_SETJMP
1903
1904/**
1905 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1906 *
1907 * @returns Strict VBox status code.
1908 * @param pVCpu The cross context virtual CPU structure of the
1909 * calling thread.
1910 * @param pb Where to return the opcode byte.
1911 */
1912DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1913{
1914 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1915 if (rcStrict == VINF_SUCCESS)
1916 {
1917 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1918 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1919 pVCpu->iem.s.offOpcode = offOpcode + 1;
1920 }
1921 else
1922 *pb = 0;
1923 return rcStrict;
1924}
1925
1926
1927/**
1928 * Fetches the next opcode byte.
1929 *
1930 * @returns Strict VBox status code.
1931 * @param pVCpu The cross context virtual CPU structure of the
1932 * calling thread.
1933 * @param pu8 Where to return the opcode byte.
1934 */
1935DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1936{
1937 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1938 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1939 {
1940 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1941 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1942 return VINF_SUCCESS;
1943 }
1944 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1945}
1946
1947#else /* IEM_WITH_SETJMP */
1948
1949/**
1950 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1951 *
1952 * @returns The opcode byte.
1953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1954 */
1955DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1956{
1957# ifdef IEM_WITH_CODE_TLB
1958 uint8_t u8;
1959 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1960 return u8;
1961# else
1962 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1963 if (rcStrict == VINF_SUCCESS)
1964 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1965 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1966# endif
1967}
1968
1969
1970/**
1971 * Fetches the next opcode byte, longjmp on error.
1972 *
1973 * @returns The opcode byte.
1974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1975 */
1976DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1977{
1978# ifdef IEM_WITH_CODE_TLB
1979 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1980 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1981 if (RT_LIKELY( pbBuf != NULL
1982 && offBuf < pVCpu->iem.s.cbInstrBuf))
1983 {
1984 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1985 return pbBuf[offBuf];
1986 }
1987# else
1988 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1989 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1990 {
1991 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1992 return pVCpu->iem.s.abOpcode[offOpcode];
1993 }
1994# endif
1995 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1996}
1997
1998#endif /* IEM_WITH_SETJMP */
1999
2000/**
2001 * Fetches the next opcode byte, returns automatically on failure.
2002 *
2003 * @param a_pu8 Where to return the opcode byte.
2004 * @remark Implicitly references pVCpu.
2005 */
2006#ifndef IEM_WITH_SETJMP
2007# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2008 do \
2009 { \
2010 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2011 if (rcStrict2 == VINF_SUCCESS) \
2012 { /* likely */ } \
2013 else \
2014 return rcStrict2; \
2015 } while (0)
2016#else
2017# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2018#endif /* IEM_WITH_SETJMP */
2019
2020
2021#ifndef IEM_WITH_SETJMP
2022/**
2023 * Fetches the next signed byte from the opcode stream.
2024 *
2025 * @returns Strict VBox status code.
2026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2027 * @param pi8 Where to return the signed byte.
2028 */
2029DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2030{
2031 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2032}
2033#endif /* !IEM_WITH_SETJMP */
2034
2035
2036/**
2037 * Fetches the next signed byte from the opcode stream, returning automatically
2038 * on failure.
2039 *
2040 * @param a_pi8 Where to return the signed byte.
2041 * @remark Implicitly references pVCpu.
2042 */
2043#ifndef IEM_WITH_SETJMP
2044# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2045 do \
2046 { \
2047 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2048 if (rcStrict2 != VINF_SUCCESS) \
2049 return rcStrict2; \
2050 } while (0)
2051#else /* IEM_WITH_SETJMP */
2052# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2053
2054#endif /* IEM_WITH_SETJMP */
2055
2056#ifndef IEM_WITH_SETJMP
2057
2058/**
2059 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2060 *
2061 * @returns Strict VBox status code.
2062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2063 * @param pu16 Where to return the opcode dword.
2064 */
2065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2066{
2067 uint8_t u8;
2068 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2069 if (rcStrict == VINF_SUCCESS)
2070 *pu16 = (int8_t)u8;
2071 return rcStrict;
2072}
2073
2074
2075/**
2076 * Fetches the next signed byte from the opcode stream, extending it to
2077 * unsigned 16-bit.
2078 *
2079 * @returns Strict VBox status code.
2080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2081 * @param pu16 Where to return the unsigned word.
2082 */
2083DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2084{
2085 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2086 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2087 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2088
2089 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2090 pVCpu->iem.s.offOpcode = offOpcode + 1;
2091 return VINF_SUCCESS;
2092}
2093
2094#endif /* !IEM_WITH_SETJMP */
2095
2096/**
2097 * Fetches the next signed byte from the opcode stream and sign-extending it to
2098 * a word, returning automatically on failure.
2099 *
2100 * @param a_pu16 Where to return the word.
2101 * @remark Implicitly references pVCpu.
2102 */
2103#ifndef IEM_WITH_SETJMP
2104# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2105 do \
2106 { \
2107 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2108 if (rcStrict2 != VINF_SUCCESS) \
2109 return rcStrict2; \
2110 } while (0)
2111#else
2112# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2113#endif
2114
2115#ifndef IEM_WITH_SETJMP
2116
2117/**
2118 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2119 *
2120 * @returns Strict VBox status code.
2121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2122 * @param pu32 Where to return the opcode dword.
2123 */
2124DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2125{
2126 uint8_t u8;
2127 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2128 if (rcStrict == VINF_SUCCESS)
2129 *pu32 = (int8_t)u8;
2130 return rcStrict;
2131}
2132
2133
2134/**
2135 * Fetches the next signed byte from the opcode stream, extending it to
2136 * unsigned 32-bit.
2137 *
2138 * @returns Strict VBox status code.
2139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2140 * @param pu32 Where to return the unsigned dword.
2141 */
2142DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2143{
2144 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2145 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2146 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2147
2148 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2149 pVCpu->iem.s.offOpcode = offOpcode + 1;
2150 return VINF_SUCCESS;
2151}
2152
2153#endif /* !IEM_WITH_SETJMP */
2154
2155/**
2156 * Fetches the next signed byte from the opcode stream and sign-extending it to
2157 * a word, returning automatically on failure.
2158 *
2159 * @param a_pu32 Where to return the word.
2160 * @remark Implicitly references pVCpu.
2161 */
2162#ifndef IEM_WITH_SETJMP
2163#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2164 do \
2165 { \
2166 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2167 if (rcStrict2 != VINF_SUCCESS) \
2168 return rcStrict2; \
2169 } while (0)
2170#else
2171# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2172#endif
2173
2174#ifndef IEM_WITH_SETJMP
2175
2176/**
2177 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2178 *
2179 * @returns Strict VBox status code.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param pu64 Where to return the opcode qword.
2182 */
2183DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2184{
2185 uint8_t u8;
2186 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2187 if (rcStrict == VINF_SUCCESS)
2188 *pu64 = (int8_t)u8;
2189 return rcStrict;
2190}
2191
2192
2193/**
2194 * Fetches the next signed byte from the opcode stream, extending it to
2195 * unsigned 64-bit.
2196 *
2197 * @returns Strict VBox status code.
2198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2199 * @param pu64 Where to return the unsigned qword.
2200 */
2201DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2202{
2203 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2204 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2205 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2206
2207 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2208 pVCpu->iem.s.offOpcode = offOpcode + 1;
2209 return VINF_SUCCESS;
2210}
2211
2212#endif /* !IEM_WITH_SETJMP */
2213
2214
2215/**
2216 * Fetches the next signed byte from the opcode stream and sign-extending it to
2217 * a word, returning automatically on failure.
2218 *
2219 * @param a_pu64 Where to return the word.
2220 * @remark Implicitly references pVCpu.
2221 */
2222#ifndef IEM_WITH_SETJMP
2223# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2224 do \
2225 { \
2226 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2227 if (rcStrict2 != VINF_SUCCESS) \
2228 return rcStrict2; \
2229 } while (0)
2230#else
2231# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2232#endif
2233
2234
2235#ifndef IEM_WITH_SETJMP
2236
2237/**
2238 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2239 *
2240 * @returns Strict VBox status code.
2241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2242 * @param pu16 Where to return the opcode word.
2243 */
2244DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2245{
2246 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2247 if (rcStrict == VINF_SUCCESS)
2248 {
2249 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2250# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2251 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2252# else
2253 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2254# endif
2255 pVCpu->iem.s.offOpcode = offOpcode + 2;
2256 }
2257 else
2258 *pu16 = 0;
2259 return rcStrict;
2260}
2261
2262
2263/**
2264 * Fetches the next opcode word.
2265 *
2266 * @returns Strict VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2268 * @param pu16 Where to return the opcode word.
2269 */
2270DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2271{
2272 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2273 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2274 {
2275 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2276# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2277 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2278# else
2279 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2280# endif
2281 return VINF_SUCCESS;
2282 }
2283 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2284}
2285
2286#else /* IEM_WITH_SETJMP */
2287
2288/**
2289 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2290 *
2291 * @returns The opcode word.
2292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2293 */
2294DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2295{
2296# ifdef IEM_WITH_CODE_TLB
2297 uint16_t u16;
2298 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2299 return u16;
2300# else
2301 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2302 if (rcStrict == VINF_SUCCESS)
2303 {
2304 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2305 pVCpu->iem.s.offOpcode += 2;
2306# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2307 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2308# else
2309 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2310# endif
2311 }
2312 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2313# endif
2314}
2315
2316
2317/**
2318 * Fetches the next opcode word, longjmp on error.
2319 *
2320 * @returns The opcode word.
2321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2322 */
2323DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2324{
2325# ifdef IEM_WITH_CODE_TLB
2326 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2327 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2328 if (RT_LIKELY( pbBuf != NULL
2329 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2330 {
2331 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2332# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2333 return *(uint16_t const *)&pbBuf[offBuf];
2334# else
2335 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2336# endif
2337 }
2338# else
2339 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2340 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2341 {
2342 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2343# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2344 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2345# else
2346 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2347# endif
2348 }
2349# endif
2350 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2351}
2352
2353#endif /* IEM_WITH_SETJMP */
2354
2355
2356/**
2357 * Fetches the next opcode word, returns automatically on failure.
2358 *
2359 * @param a_pu16 Where to return the opcode word.
2360 * @remark Implicitly references pVCpu.
2361 */
2362#ifndef IEM_WITH_SETJMP
2363# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2364 do \
2365 { \
2366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2367 if (rcStrict2 != VINF_SUCCESS) \
2368 return rcStrict2; \
2369 } while (0)
2370#else
2371# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2372#endif
2373
2374#ifndef IEM_WITH_SETJMP
2375
2376/**
2377 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2378 *
2379 * @returns Strict VBox status code.
2380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2381 * @param pu32 Where to return the opcode double word.
2382 */
2383DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2384{
2385 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2386 if (rcStrict == VINF_SUCCESS)
2387 {
2388 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2389 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2390 pVCpu->iem.s.offOpcode = offOpcode + 2;
2391 }
2392 else
2393 *pu32 = 0;
2394 return rcStrict;
2395}
2396
2397
2398/**
2399 * Fetches the next opcode word, zero extending it to a double word.
2400 *
2401 * @returns Strict VBox status code.
2402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2403 * @param pu32 Where to return the opcode double word.
2404 */
2405DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2406{
2407 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2408 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2409 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2410
2411 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2412 pVCpu->iem.s.offOpcode = offOpcode + 2;
2413 return VINF_SUCCESS;
2414}
2415
2416#endif /* !IEM_WITH_SETJMP */
2417
2418
2419/**
2420 * Fetches the next opcode word and zero extends it to a double word, returns
2421 * automatically on failure.
2422 *
2423 * @param a_pu32 Where to return the opcode double word.
2424 * @remark Implicitly references pVCpu.
2425 */
2426#ifndef IEM_WITH_SETJMP
2427# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2428 do \
2429 { \
2430 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2431 if (rcStrict2 != VINF_SUCCESS) \
2432 return rcStrict2; \
2433 } while (0)
2434#else
2435# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2436#endif
2437
2438#ifndef IEM_WITH_SETJMP
2439
2440/**
2441 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2442 *
2443 * @returns Strict VBox status code.
2444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2445 * @param pu64 Where to return the opcode quad word.
2446 */
2447DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2448{
2449 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2450 if (rcStrict == VINF_SUCCESS)
2451 {
2452 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2453 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2454 pVCpu->iem.s.offOpcode = offOpcode + 2;
2455 }
2456 else
2457 *pu64 = 0;
2458 return rcStrict;
2459}
2460
2461
2462/**
2463 * Fetches the next opcode word, zero extending it to a quad word.
2464 *
2465 * @returns Strict VBox status code.
2466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2467 * @param pu64 Where to return the opcode quad word.
2468 */
2469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2470{
2471 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2472 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2473 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2474
2475 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2476 pVCpu->iem.s.offOpcode = offOpcode + 2;
2477 return VINF_SUCCESS;
2478}
2479
2480#endif /* !IEM_WITH_SETJMP */
2481
2482/**
2483 * Fetches the next opcode word and zero extends it to a quad word, returns
2484 * automatically on failure.
2485 *
2486 * @param a_pu64 Where to return the opcode quad word.
2487 * @remark Implicitly references pVCpu.
2488 */
2489#ifndef IEM_WITH_SETJMP
2490# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2491 do \
2492 { \
2493 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2494 if (rcStrict2 != VINF_SUCCESS) \
2495 return rcStrict2; \
2496 } while (0)
2497#else
2498# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2499#endif
2500
2501
2502#ifndef IEM_WITH_SETJMP
2503/**
2504 * Fetches the next signed word from the opcode stream.
2505 *
2506 * @returns Strict VBox status code.
2507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2508 * @param pi16 Where to return the signed word.
2509 */
2510DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2511{
2512 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2513}
2514#endif /* !IEM_WITH_SETJMP */
2515
2516
2517/**
2518 * Fetches the next signed word from the opcode stream, returning automatically
2519 * on failure.
2520 *
2521 * @param a_pi16 Where to return the signed word.
2522 * @remark Implicitly references pVCpu.
2523 */
2524#ifndef IEM_WITH_SETJMP
2525# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2526 do \
2527 { \
2528 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2529 if (rcStrict2 != VINF_SUCCESS) \
2530 return rcStrict2; \
2531 } while (0)
2532#else
2533# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2534#endif
2535
2536#ifndef IEM_WITH_SETJMP
2537
2538/**
2539 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2540 *
2541 * @returns Strict VBox status code.
2542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2543 * @param pu32 Where to return the opcode dword.
2544 */
2545DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2546{
2547 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2548 if (rcStrict == VINF_SUCCESS)
2549 {
2550 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2555 pVCpu->iem.s.abOpcode[offOpcode + 1],
2556 pVCpu->iem.s.abOpcode[offOpcode + 2],
2557 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2558# endif
2559 pVCpu->iem.s.offOpcode = offOpcode + 4;
2560 }
2561 else
2562 *pu32 = 0;
2563 return rcStrict;
2564}
2565
2566
2567/**
2568 * Fetches the next opcode dword.
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2572 * @param pu32 Where to return the opcode double word.
2573 */
2574DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2575{
2576 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2577 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2578 {
2579 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2581 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2582# else
2583 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2584 pVCpu->iem.s.abOpcode[offOpcode + 1],
2585 pVCpu->iem.s.abOpcode[offOpcode + 2],
2586 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2587# endif
2588 return VINF_SUCCESS;
2589 }
2590 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2591}
2592
2593#else /* !IEM_WITH_SETJMP */
2594
2595/**
2596 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2597 *
2598 * @returns The opcode dword.
2599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2600 */
2601DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2602{
2603# ifdef IEM_WITH_CODE_TLB
2604 uint32_t u32;
2605 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2606 return u32;
2607# else
2608 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2609 if (rcStrict == VINF_SUCCESS)
2610 {
2611 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2612 pVCpu->iem.s.offOpcode = offOpcode + 4;
2613# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2614 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2615# else
2616 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2617 pVCpu->iem.s.abOpcode[offOpcode + 1],
2618 pVCpu->iem.s.abOpcode[offOpcode + 2],
2619 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2620# endif
2621 }
2622 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2623# endif
2624}
2625
2626
2627/**
2628 * Fetches the next opcode dword, longjmp on error.
2629 *
2630 * @returns The opcode dword.
2631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2632 */
2633DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2634{
2635# ifdef IEM_WITH_CODE_TLB
2636 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2637 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2638 if (RT_LIKELY( pbBuf != NULL
2639 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2640 {
2641 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2643 return *(uint32_t const *)&pbBuf[offBuf];
2644# else
2645 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2646 pbBuf[offBuf + 1],
2647 pbBuf[offBuf + 2],
2648 pbBuf[offBuf + 3]);
2649# endif
2650 }
2651# else
2652 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2654 {
2655 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2657 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2658# else
2659 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2660 pVCpu->iem.s.abOpcode[offOpcode + 1],
2661 pVCpu->iem.s.abOpcode[offOpcode + 2],
2662 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2663# endif
2664 }
2665# endif
2666 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2667}
2668
2669#endif /* !IEM_WITH_SETJMP */
2670
2671
2672/**
2673 * Fetches the next opcode dword, returns automatically on failure.
2674 *
2675 * @param a_pu32 Where to return the opcode dword.
2676 * @remark Implicitly references pVCpu.
2677 */
2678#ifndef IEM_WITH_SETJMP
2679# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2680 do \
2681 { \
2682 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2683 if (rcStrict2 != VINF_SUCCESS) \
2684 return rcStrict2; \
2685 } while (0)
2686#else
2687# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2688#endif
2689
2690#ifndef IEM_WITH_SETJMP
2691
2692/**
2693 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2694 *
2695 * @returns Strict VBox status code.
2696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2697 * @param pu64 Where to return the opcode dword.
2698 */
2699DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2700{
2701 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2702 if (rcStrict == VINF_SUCCESS)
2703 {
2704 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2705 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2706 pVCpu->iem.s.abOpcode[offOpcode + 1],
2707 pVCpu->iem.s.abOpcode[offOpcode + 2],
2708 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2709 pVCpu->iem.s.offOpcode = offOpcode + 4;
2710 }
2711 else
2712 *pu64 = 0;
2713 return rcStrict;
2714}
2715
2716
2717/**
2718 * Fetches the next opcode dword, zero extending it to a quad word.
2719 *
2720 * @returns Strict VBox status code.
2721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2722 * @param pu64 Where to return the opcode quad word.
2723 */
2724DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2725{
2726 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2727 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2728 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2729
2730 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2731 pVCpu->iem.s.abOpcode[offOpcode + 1],
2732 pVCpu->iem.s.abOpcode[offOpcode + 2],
2733 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2734 pVCpu->iem.s.offOpcode = offOpcode + 4;
2735 return VINF_SUCCESS;
2736}
2737
2738#endif /* !IEM_WITH_SETJMP */
2739
2740
2741/**
2742 * Fetches the next opcode dword and zero extends it to a quad word, returns
2743 * automatically on failure.
2744 *
2745 * @param a_pu64 Where to return the opcode quad word.
2746 * @remark Implicitly references pVCpu.
2747 */
2748#ifndef IEM_WITH_SETJMP
2749# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2750 do \
2751 { \
2752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2753 if (rcStrict2 != VINF_SUCCESS) \
2754 return rcStrict2; \
2755 } while (0)
2756#else
2757# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2758#endif
2759
2760
2761#ifndef IEM_WITH_SETJMP
2762/**
2763 * Fetches the next signed double word from the opcode stream.
2764 *
2765 * @returns Strict VBox status code.
2766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2767 * @param pi32 Where to return the signed double word.
2768 */
2769DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2770{
2771 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2772}
2773#endif
2774
2775/**
2776 * Fetches the next signed double word from the opcode stream, returning
2777 * automatically on failure.
2778 *
2779 * @param a_pi32 Where to return the signed double word.
2780 * @remark Implicitly references pVCpu.
2781 */
2782#ifndef IEM_WITH_SETJMP
2783# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2784 do \
2785 { \
2786 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2787 if (rcStrict2 != VINF_SUCCESS) \
2788 return rcStrict2; \
2789 } while (0)
2790#else
2791# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2792#endif
2793
2794#ifndef IEM_WITH_SETJMP
2795
2796/**
2797 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2798 *
2799 * @returns Strict VBox status code.
2800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2801 * @param pu64 Where to return the opcode qword.
2802 */
2803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2804{
2805 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2806 if (rcStrict == VINF_SUCCESS)
2807 {
2808 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2809 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2810 pVCpu->iem.s.abOpcode[offOpcode + 1],
2811 pVCpu->iem.s.abOpcode[offOpcode + 2],
2812 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2813 pVCpu->iem.s.offOpcode = offOpcode + 4;
2814 }
2815 else
2816 *pu64 = 0;
2817 return rcStrict;
2818}
2819
2820
2821/**
2822 * Fetches the next opcode dword, sign extending it into a quad word.
2823 *
2824 * @returns Strict VBox status code.
2825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2826 * @param pu64 Where to return the opcode quad word.
2827 */
2828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2829{
2830 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2831 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2832 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2833
2834 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2835 pVCpu->iem.s.abOpcode[offOpcode + 1],
2836 pVCpu->iem.s.abOpcode[offOpcode + 2],
2837 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2838 *pu64 = i32;
2839 pVCpu->iem.s.offOpcode = offOpcode + 4;
2840 return VINF_SUCCESS;
2841}
2842
2843#endif /* !IEM_WITH_SETJMP */
2844
2845
2846/**
2847 * Fetches the next opcode double word and sign extends it to a quad word,
2848 * returns automatically on failure.
2849 *
2850 * @param a_pu64 Where to return the opcode quad word.
2851 * @remark Implicitly references pVCpu.
2852 */
2853#ifndef IEM_WITH_SETJMP
2854# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2855 do \
2856 { \
2857 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2858 if (rcStrict2 != VINF_SUCCESS) \
2859 return rcStrict2; \
2860 } while (0)
2861#else
2862# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2863#endif
2864
2865#ifndef IEM_WITH_SETJMP
2866
2867/**
2868 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2869 *
2870 * @returns Strict VBox status code.
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 * @param pu64 Where to return the opcode qword.
2873 */
2874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2875{
2876 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2877 if (rcStrict == VINF_SUCCESS)
2878 {
2879 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2880# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2881 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2882# else
2883 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3],
2887 pVCpu->iem.s.abOpcode[offOpcode + 4],
2888 pVCpu->iem.s.abOpcode[offOpcode + 5],
2889 pVCpu->iem.s.abOpcode[offOpcode + 6],
2890 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2891# endif
2892 pVCpu->iem.s.offOpcode = offOpcode + 8;
2893 }
2894 else
2895 *pu64 = 0;
2896 return rcStrict;
2897}
2898
2899
2900/**
2901 * Fetches the next opcode qword.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 * @param pu64 Where to return the opcode qword.
2906 */
2907DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2908{
2909 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2910 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2911 {
2912# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2913 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2914# else
2915 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2916 pVCpu->iem.s.abOpcode[offOpcode + 1],
2917 pVCpu->iem.s.abOpcode[offOpcode + 2],
2918 pVCpu->iem.s.abOpcode[offOpcode + 3],
2919 pVCpu->iem.s.abOpcode[offOpcode + 4],
2920 pVCpu->iem.s.abOpcode[offOpcode + 5],
2921 pVCpu->iem.s.abOpcode[offOpcode + 6],
2922 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2923# endif
2924 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2925 return VINF_SUCCESS;
2926 }
2927 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2928}
2929
2930#else /* IEM_WITH_SETJMP */
2931
2932/**
2933 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2934 *
2935 * @returns The opcode qword.
2936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2937 */
2938DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2939{
2940# ifdef IEM_WITH_CODE_TLB
2941 uint64_t u64;
2942 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2943 return u64;
2944# else
2945 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2946 if (rcStrict == VINF_SUCCESS)
2947 {
2948 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2949 pVCpu->iem.s.offOpcode = offOpcode + 8;
2950# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2951 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2952# else
2953 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2954 pVCpu->iem.s.abOpcode[offOpcode + 1],
2955 pVCpu->iem.s.abOpcode[offOpcode + 2],
2956 pVCpu->iem.s.abOpcode[offOpcode + 3],
2957 pVCpu->iem.s.abOpcode[offOpcode + 4],
2958 pVCpu->iem.s.abOpcode[offOpcode + 5],
2959 pVCpu->iem.s.abOpcode[offOpcode + 6],
2960 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2961# endif
2962 }
2963 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2964# endif
2965}
2966
2967
2968/**
2969 * Fetches the next opcode qword, longjmp on error.
2970 *
2971 * @returns The opcode qword.
2972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2973 */
2974DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2975{
2976# ifdef IEM_WITH_CODE_TLB
2977 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2978 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2979 if (RT_LIKELY( pbBuf != NULL
2980 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2981 {
2982 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2983# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2984 return *(uint64_t const *)&pbBuf[offBuf];
2985# else
2986 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2987 pbBuf[offBuf + 1],
2988 pbBuf[offBuf + 2],
2989 pbBuf[offBuf + 3],
2990 pbBuf[offBuf + 4],
2991 pbBuf[offBuf + 5],
2992 pbBuf[offBuf + 6],
2993 pbBuf[offBuf + 7]);
2994# endif
2995 }
2996# else
2997 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2998 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2999 {
3000 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3001# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3002 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3003# else
3004 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3005 pVCpu->iem.s.abOpcode[offOpcode + 1],
3006 pVCpu->iem.s.abOpcode[offOpcode + 2],
3007 pVCpu->iem.s.abOpcode[offOpcode + 3],
3008 pVCpu->iem.s.abOpcode[offOpcode + 4],
3009 pVCpu->iem.s.abOpcode[offOpcode + 5],
3010 pVCpu->iem.s.abOpcode[offOpcode + 6],
3011 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3012# endif
3013 }
3014# endif
3015 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3016}
3017
3018#endif /* IEM_WITH_SETJMP */
3019
3020/**
3021 * Fetches the next opcode quad word, returns automatically on failure.
3022 *
3023 * @param a_pu64 Where to return the opcode quad word.
3024 * @remark Implicitly references pVCpu.
3025 */
3026#ifndef IEM_WITH_SETJMP
3027# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3028 do \
3029 { \
3030 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3031 if (rcStrict2 != VINF_SUCCESS) \
3032 return rcStrict2; \
3033 } while (0)
3034#else
3035# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3036#endif
3037
3038
3039/** @name Misc Worker Functions.
3040 * @{
3041 */
3042
3043
3044/**
3045 * Validates a new SS segment.
3046 *
3047 * @returns VBox strict status code.
3048 * @param pVCpu The cross context virtual CPU structure of the
3049 * calling thread.
3050 * @param pCtx The CPU context.
3051 * @param NewSS The new SS selctor.
3052 * @param uCpl The CPL to load the stack for.
3053 * @param pDesc Where to return the descriptor.
3054 */
3055IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3056{
3057 NOREF(pCtx);
3058
3059 /* Null selectors are not allowed (we're not called for dispatching
3060 interrupts with SS=0 in long mode). */
3061 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3062 {
3063 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3064 return iemRaiseTaskSwitchFault0(pVCpu);
3065 }
3066
3067 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3068 if ((NewSS & X86_SEL_RPL) != uCpl)
3069 {
3070 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3071 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3072 }
3073
3074 /*
3075 * Read the descriptor.
3076 */
3077 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3078 if (rcStrict != VINF_SUCCESS)
3079 return rcStrict;
3080
3081 /*
3082 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3083 */
3084 if (!pDesc->Legacy.Gen.u1DescType)
3085 {
3086 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3087 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3088 }
3089
3090 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3091 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3092 {
3093 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3094 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3095 }
3096 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3099 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3100 }
3101
3102 /* Is it there? */
3103 /** @todo testcase: Is this checked before the canonical / limit check below? */
3104 if (!pDesc->Legacy.Gen.u1Present)
3105 {
3106 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3107 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3108 }
3109
3110 return VINF_SUCCESS;
3111}
3112
3113
3114/**
3115 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3116 * not.
3117 *
3118 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3119 * @param a_pCtx The CPU context.
3120 */
3121#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3122# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3123 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3124 ? (a_pCtx)->eflags.u \
3125 : CPUMRawGetEFlags(a_pVCpu) )
3126#else
3127# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3128 ( (a_pCtx)->eflags.u )
3129#endif
3130
3131/**
3132 * Updates the EFLAGS in the correct manner wrt. PATM.
3133 *
3134 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3135 * @param a_pCtx The CPU context.
3136 * @param a_fEfl The new EFLAGS.
3137 */
3138#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3139# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3140 do { \
3141 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3142 (a_pCtx)->eflags.u = (a_fEfl); \
3143 else \
3144 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3145 } while (0)
3146#else
3147# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3148 do { \
3149 (a_pCtx)->eflags.u = (a_fEfl); \
3150 } while (0)
3151#endif
3152
3153
3154/** @} */
3155
3156/** @name Raising Exceptions.
3157 *
3158 * @{
3159 */
3160
3161/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3162 * @{ */
3163/** CPU exception. */
3164#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3165/** External interrupt (from PIC, APIC, whatever). */
3166#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3167/** Software interrupt (int or into, not bound).
3168 * Returns to the following instruction */
3169#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3170/** Takes an error code. */
3171#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3172/** Takes a CR2. */
3173#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3174/** Generated by the breakpoint instruction. */
3175#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3176/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3177#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3178/** @} */
3179
3180
3181/**
3182 * Loads the specified stack far pointer from the TSS.
3183 *
3184 * @returns VBox strict status code.
3185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3186 * @param pCtx The CPU context.
3187 * @param uCpl The CPL to load the stack for.
3188 * @param pSelSS Where to return the new stack segment.
3189 * @param puEsp Where to return the new stack pointer.
3190 */
3191IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3192 PRTSEL pSelSS, uint32_t *puEsp)
3193{
3194 VBOXSTRICTRC rcStrict;
3195 Assert(uCpl < 4);
3196
3197 switch (pCtx->tr.Attr.n.u4Type)
3198 {
3199 /*
3200 * 16-bit TSS (X86TSS16).
3201 */
3202 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3203 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3204 {
3205 uint32_t off = uCpl * 4 + 2;
3206 if (off + 4 <= pCtx->tr.u32Limit)
3207 {
3208 /** @todo check actual access pattern here. */
3209 uint32_t u32Tmp = 0; /* gcc maybe... */
3210 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3211 if (rcStrict == VINF_SUCCESS)
3212 {
3213 *puEsp = RT_LOWORD(u32Tmp);
3214 *pSelSS = RT_HIWORD(u32Tmp);
3215 return VINF_SUCCESS;
3216 }
3217 }
3218 else
3219 {
3220 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3221 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3222 }
3223 break;
3224 }
3225
3226 /*
3227 * 32-bit TSS (X86TSS32).
3228 */
3229 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3230 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3231 {
3232 uint32_t off = uCpl * 8 + 4;
3233 if (off + 7 <= pCtx->tr.u32Limit)
3234 {
3235/** @todo check actual access pattern here. */
3236 uint64_t u64Tmp;
3237 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3238 if (rcStrict == VINF_SUCCESS)
3239 {
3240 *puEsp = u64Tmp & UINT32_MAX;
3241 *pSelSS = (RTSEL)(u64Tmp >> 32);
3242 return VINF_SUCCESS;
3243 }
3244 }
3245 else
3246 {
3247 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3248 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3249 }
3250 break;
3251 }
3252
3253 default:
3254 AssertFailed();
3255 rcStrict = VERR_IEM_IPE_4;
3256 break;
3257 }
3258
3259 *puEsp = 0; /* make gcc happy */
3260 *pSelSS = 0; /* make gcc happy */
3261 return rcStrict;
3262}
3263
3264
3265/**
3266 * Loads the specified stack pointer from the 64-bit TSS.
3267 *
3268 * @returns VBox strict status code.
3269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3270 * @param pCtx The CPU context.
3271 * @param uCpl The CPL to load the stack for.
3272 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3273 * @param puRsp Where to return the new stack pointer.
3274 */
3275IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3276{
3277 Assert(uCpl < 4);
3278 Assert(uIst < 8);
3279 *puRsp = 0; /* make gcc happy */
3280
3281 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3282
3283 uint32_t off;
3284 if (uIst)
3285 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3286 else
3287 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3288 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3289 {
3290 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3291 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3292 }
3293
3294 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3295}
3296
3297
3298/**
3299 * Adjust the CPU state according to the exception being raised.
3300 *
3301 * @param pCtx The CPU context.
3302 * @param u8Vector The exception that has been raised.
3303 */
3304DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3305{
3306 switch (u8Vector)
3307 {
3308 case X86_XCPT_DB:
3309 pCtx->dr[7] &= ~X86_DR7_GD;
3310 break;
3311 /** @todo Read the AMD and Intel exception reference... */
3312 }
3313}
3314
3315
3316/**
3317 * Implements exceptions and interrupts for real mode.
3318 *
3319 * @returns VBox strict status code.
3320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3321 * @param pCtx The CPU context.
3322 * @param cbInstr The number of bytes to offset rIP by in the return
3323 * address.
3324 * @param u8Vector The interrupt / exception vector number.
3325 * @param fFlags The flags.
3326 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3327 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3328 */
3329IEM_STATIC VBOXSTRICTRC
3330iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3331 PCPUMCTX pCtx,
3332 uint8_t cbInstr,
3333 uint8_t u8Vector,
3334 uint32_t fFlags,
3335 uint16_t uErr,
3336 uint64_t uCr2)
3337{
3338 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3339 NOREF(uErr); NOREF(uCr2);
3340
3341 /*
3342 * Read the IDT entry.
3343 */
3344 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3345 {
3346 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3347 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3348 }
3349 RTFAR16 Idte;
3350 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3351 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3352 return rcStrict;
3353
3354 /*
3355 * Push the stack frame.
3356 */
3357 uint16_t *pu16Frame;
3358 uint64_t uNewRsp;
3359 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3360 if (rcStrict != VINF_SUCCESS)
3361 return rcStrict;
3362
3363 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3364#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3365 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3366 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3367 fEfl |= UINT16_C(0xf000);
3368#endif
3369 pu16Frame[2] = (uint16_t)fEfl;
3370 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3371 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3372 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3373 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3374 return rcStrict;
3375
3376 /*
3377 * Load the vector address into cs:ip and make exception specific state
3378 * adjustments.
3379 */
3380 pCtx->cs.Sel = Idte.sel;
3381 pCtx->cs.ValidSel = Idte.sel;
3382 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3383 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3384 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3385 pCtx->rip = Idte.off;
3386 fEfl &= ~X86_EFL_IF;
3387 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3388
3389 /** @todo do we actually do this in real mode? */
3390 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3391 iemRaiseXcptAdjustState(pCtx, u8Vector);
3392
3393 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3394}
3395
3396
3397/**
3398 * Loads a NULL data selector into when coming from V8086 mode.
3399 *
3400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3401 * @param pSReg Pointer to the segment register.
3402 */
3403IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3404{
3405 pSReg->Sel = 0;
3406 pSReg->ValidSel = 0;
3407 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3408 {
3409 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3410 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3411 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3412 }
3413 else
3414 {
3415 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3416 /** @todo check this on AMD-V */
3417 pSReg->u64Base = 0;
3418 pSReg->u32Limit = 0;
3419 }
3420}
3421
3422
3423/**
3424 * Loads a segment selector during a task switch in V8086 mode.
3425 *
3426 * @param pSReg Pointer to the segment register.
3427 * @param uSel The selector value to load.
3428 */
3429IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3430{
3431 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3432 pSReg->Sel = uSel;
3433 pSReg->ValidSel = uSel;
3434 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3435 pSReg->u64Base = uSel << 4;
3436 pSReg->u32Limit = 0xffff;
3437 pSReg->Attr.u = 0xf3;
3438}
3439
3440
3441/**
3442 * Loads a NULL data selector into a selector register, both the hidden and
3443 * visible parts, in protected mode.
3444 *
3445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3446 * @param pSReg Pointer to the segment register.
3447 * @param uRpl The RPL.
3448 */
3449IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3450{
3451 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3452 * data selector in protected mode. */
3453 pSReg->Sel = uRpl;
3454 pSReg->ValidSel = uRpl;
3455 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3456 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3457 {
3458 /* VT-x (Intel 3960x) observed doing something like this. */
3459 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3460 pSReg->u32Limit = UINT32_MAX;
3461 pSReg->u64Base = 0;
3462 }
3463 else
3464 {
3465 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3466 pSReg->u32Limit = 0;
3467 pSReg->u64Base = 0;
3468 }
3469}
3470
3471
3472/**
3473 * Loads a segment selector during a task switch in protected mode.
3474 *
3475 * In this task switch scenario, we would throw \#TS exceptions rather than
3476 * \#GPs.
3477 *
3478 * @returns VBox strict status code.
3479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3480 * @param pSReg Pointer to the segment register.
3481 * @param uSel The new selector value.
3482 *
3483 * @remarks This does _not_ handle CS or SS.
3484 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3485 */
3486IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3487{
3488 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3489
3490 /* Null data selector. */
3491 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3492 {
3493 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3494 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3495 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3496 return VINF_SUCCESS;
3497 }
3498
3499 /* Fetch the descriptor. */
3500 IEMSELDESC Desc;
3501 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3502 if (rcStrict != VINF_SUCCESS)
3503 {
3504 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3505 VBOXSTRICTRC_VAL(rcStrict)));
3506 return rcStrict;
3507 }
3508
3509 /* Must be a data segment or readable code segment. */
3510 if ( !Desc.Legacy.Gen.u1DescType
3511 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3512 {
3513 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3514 Desc.Legacy.Gen.u4Type));
3515 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3516 }
3517
3518 /* Check privileges for data segments and non-conforming code segments. */
3519 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3520 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3521 {
3522 /* The RPL and the new CPL must be less than or equal to the DPL. */
3523 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3524 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3525 {
3526 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3527 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3528 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3529 }
3530 }
3531
3532 /* Is it there? */
3533 if (!Desc.Legacy.Gen.u1Present)
3534 {
3535 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3536 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3537 }
3538
3539 /* The base and limit. */
3540 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3541 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3542
3543 /*
3544 * Ok, everything checked out fine. Now set the accessed bit before
3545 * committing the result into the registers.
3546 */
3547 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3548 {
3549 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3550 if (rcStrict != VINF_SUCCESS)
3551 return rcStrict;
3552 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3553 }
3554
3555 /* Commit */
3556 pSReg->Sel = uSel;
3557 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3558 pSReg->u32Limit = cbLimit;
3559 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3560 pSReg->ValidSel = uSel;
3561 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3562 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3563 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3564
3565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3566 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3567 return VINF_SUCCESS;
3568}
3569
3570
3571/**
3572 * Performs a task switch.
3573 *
3574 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3575 * caller is responsible for performing the necessary checks (like DPL, TSS
3576 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3577 * reference for JMP, CALL, IRET.
3578 *
3579 * If the task switch is the due to a software interrupt or hardware exception,
3580 * the caller is responsible for validating the TSS selector and descriptor. See
3581 * Intel Instruction reference for INT n.
3582 *
3583 * @returns VBox strict status code.
3584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param pCtx The CPU context.
3586 * @param enmTaskSwitch What caused this task switch.
3587 * @param uNextEip The EIP effective after the task switch.
3588 * @param fFlags The flags.
3589 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3590 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3591 * @param SelTSS The TSS selector of the new task.
3592 * @param pNewDescTSS Pointer to the new TSS descriptor.
3593 */
3594IEM_STATIC VBOXSTRICTRC
3595iemTaskSwitch(PVMCPU pVCpu,
3596 PCPUMCTX pCtx,
3597 IEMTASKSWITCH enmTaskSwitch,
3598 uint32_t uNextEip,
3599 uint32_t fFlags,
3600 uint16_t uErr,
3601 uint64_t uCr2,
3602 RTSEL SelTSS,
3603 PIEMSELDESC pNewDescTSS)
3604{
3605 Assert(!IEM_IS_REAL_MODE(pVCpu));
3606 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3607
3608 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3609 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3610 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3611 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3612 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3613
3614 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3615 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3616
3617 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3618 fIsNewTSS386, pCtx->eip, uNextEip));
3619
3620 /* Update CR2 in case it's a page-fault. */
3621 /** @todo This should probably be done much earlier in IEM/PGM. See
3622 * @bugref{5653#c49}. */
3623 if (fFlags & IEM_XCPT_FLAGS_CR2)
3624 pCtx->cr2 = uCr2;
3625
3626 /*
3627 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3628 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3629 */
3630 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3631 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3632 if (uNewTSSLimit < uNewTSSLimitMin)
3633 {
3634 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3635 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3636 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3637 }
3638
3639 /*
3640 * Check the current TSS limit. The last written byte to the current TSS during the
3641 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3642 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3643 *
3644 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3645 * end up with smaller than "legal" TSS limits.
3646 */
3647 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3648 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3649 if (uCurTSSLimit < uCurTSSLimitMin)
3650 {
3651 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3652 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3653 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3654 }
3655
3656 /*
3657 * Verify that the new TSS can be accessed and map it. Map only the required contents
3658 * and not the entire TSS.
3659 */
3660 void *pvNewTSS;
3661 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3662 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3663 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3664 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3665 * not perform correct translation if this happens. See Intel spec. 7.2.1
3666 * "Task-State Segment" */
3667 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3668 if (rcStrict != VINF_SUCCESS)
3669 {
3670 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3671 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3672 return rcStrict;
3673 }
3674
3675 /*
3676 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3677 */
3678 uint32_t u32EFlags = pCtx->eflags.u32;
3679 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3680 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3681 {
3682 PX86DESC pDescCurTSS;
3683 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3684 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3685 if (rcStrict != VINF_SUCCESS)
3686 {
3687 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3688 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3689 return rcStrict;
3690 }
3691
3692 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3693 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3694 if (rcStrict != VINF_SUCCESS)
3695 {
3696 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3697 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3698 return rcStrict;
3699 }
3700
3701 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3702 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3703 {
3704 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3705 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3706 u32EFlags &= ~X86_EFL_NT;
3707 }
3708 }
3709
3710 /*
3711 * Save the CPU state into the current TSS.
3712 */
3713 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3714 if (GCPtrNewTSS == GCPtrCurTSS)
3715 {
3716 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3717 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3718 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3719 }
3720 if (fIsNewTSS386)
3721 {
3722 /*
3723 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3724 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3725 */
3726 void *pvCurTSS32;
3727 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3728 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3729 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3730 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3731 if (rcStrict != VINF_SUCCESS)
3732 {
3733 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3734 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3735 return rcStrict;
3736 }
3737
3738 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3739 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3740 pCurTSS32->eip = uNextEip;
3741 pCurTSS32->eflags = u32EFlags;
3742 pCurTSS32->eax = pCtx->eax;
3743 pCurTSS32->ecx = pCtx->ecx;
3744 pCurTSS32->edx = pCtx->edx;
3745 pCurTSS32->ebx = pCtx->ebx;
3746 pCurTSS32->esp = pCtx->esp;
3747 pCurTSS32->ebp = pCtx->ebp;
3748 pCurTSS32->esi = pCtx->esi;
3749 pCurTSS32->edi = pCtx->edi;
3750 pCurTSS32->es = pCtx->es.Sel;
3751 pCurTSS32->cs = pCtx->cs.Sel;
3752 pCurTSS32->ss = pCtx->ss.Sel;
3753 pCurTSS32->ds = pCtx->ds.Sel;
3754 pCurTSS32->fs = pCtx->fs.Sel;
3755 pCurTSS32->gs = pCtx->gs.Sel;
3756
3757 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3758 if (rcStrict != VINF_SUCCESS)
3759 {
3760 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3761 VBOXSTRICTRC_VAL(rcStrict)));
3762 return rcStrict;
3763 }
3764 }
3765 else
3766 {
3767 /*
3768 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3769 */
3770 void *pvCurTSS16;
3771 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3772 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3773 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3774 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3775 if (rcStrict != VINF_SUCCESS)
3776 {
3777 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3778 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3779 return rcStrict;
3780 }
3781
3782 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3783 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3784 pCurTSS16->ip = uNextEip;
3785 pCurTSS16->flags = u32EFlags;
3786 pCurTSS16->ax = pCtx->ax;
3787 pCurTSS16->cx = pCtx->cx;
3788 pCurTSS16->dx = pCtx->dx;
3789 pCurTSS16->bx = pCtx->bx;
3790 pCurTSS16->sp = pCtx->sp;
3791 pCurTSS16->bp = pCtx->bp;
3792 pCurTSS16->si = pCtx->si;
3793 pCurTSS16->di = pCtx->di;
3794 pCurTSS16->es = pCtx->es.Sel;
3795 pCurTSS16->cs = pCtx->cs.Sel;
3796 pCurTSS16->ss = pCtx->ss.Sel;
3797 pCurTSS16->ds = pCtx->ds.Sel;
3798
3799 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3800 if (rcStrict != VINF_SUCCESS)
3801 {
3802 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3803 VBOXSTRICTRC_VAL(rcStrict)));
3804 return rcStrict;
3805 }
3806 }
3807
3808 /*
3809 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3810 */
3811 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3812 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3813 {
3814 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3815 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3816 pNewTSS->selPrev = pCtx->tr.Sel;
3817 }
3818
3819 /*
3820 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3821 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3822 */
3823 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3824 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3825 bool fNewDebugTrap;
3826 if (fIsNewTSS386)
3827 {
3828 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3829 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3830 uNewEip = pNewTSS32->eip;
3831 uNewEflags = pNewTSS32->eflags;
3832 uNewEax = pNewTSS32->eax;
3833 uNewEcx = pNewTSS32->ecx;
3834 uNewEdx = pNewTSS32->edx;
3835 uNewEbx = pNewTSS32->ebx;
3836 uNewEsp = pNewTSS32->esp;
3837 uNewEbp = pNewTSS32->ebp;
3838 uNewEsi = pNewTSS32->esi;
3839 uNewEdi = pNewTSS32->edi;
3840 uNewES = pNewTSS32->es;
3841 uNewCS = pNewTSS32->cs;
3842 uNewSS = pNewTSS32->ss;
3843 uNewDS = pNewTSS32->ds;
3844 uNewFS = pNewTSS32->fs;
3845 uNewGS = pNewTSS32->gs;
3846 uNewLdt = pNewTSS32->selLdt;
3847 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3848 }
3849 else
3850 {
3851 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3852 uNewCr3 = 0;
3853 uNewEip = pNewTSS16->ip;
3854 uNewEflags = pNewTSS16->flags;
3855 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3856 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3857 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3858 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3859 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3860 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3861 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3862 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3863 uNewES = pNewTSS16->es;
3864 uNewCS = pNewTSS16->cs;
3865 uNewSS = pNewTSS16->ss;
3866 uNewDS = pNewTSS16->ds;
3867 uNewFS = 0;
3868 uNewGS = 0;
3869 uNewLdt = pNewTSS16->selLdt;
3870 fNewDebugTrap = false;
3871 }
3872
3873 if (GCPtrNewTSS == GCPtrCurTSS)
3874 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3875 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3876
3877 /*
3878 * We're done accessing the new TSS.
3879 */
3880 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3881 if (rcStrict != VINF_SUCCESS)
3882 {
3883 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /*
3888 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3889 */
3890 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3891 {
3892 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3893 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3894 if (rcStrict != VINF_SUCCESS)
3895 {
3896 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3897 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3898 return rcStrict;
3899 }
3900
3901 /* Check that the descriptor indicates the new TSS is available (not busy). */
3902 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3903 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3904 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3905
3906 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3907 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3908 if (rcStrict != VINF_SUCCESS)
3909 {
3910 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3911 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3912 return rcStrict;
3913 }
3914 }
3915
3916 /*
3917 * From this point on, we're technically in the new task. We will defer exceptions
3918 * until the completion of the task switch but before executing any instructions in the new task.
3919 */
3920 pCtx->tr.Sel = SelTSS;
3921 pCtx->tr.ValidSel = SelTSS;
3922 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3923 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3924 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3925 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3926 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3927
3928 /* Set the busy bit in TR. */
3929 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3930 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3931 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3932 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3933 {
3934 uNewEflags |= X86_EFL_NT;
3935 }
3936
3937 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3938 pCtx->cr0 |= X86_CR0_TS;
3939 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3940
3941 pCtx->eip = uNewEip;
3942 pCtx->eax = uNewEax;
3943 pCtx->ecx = uNewEcx;
3944 pCtx->edx = uNewEdx;
3945 pCtx->ebx = uNewEbx;
3946 pCtx->esp = uNewEsp;
3947 pCtx->ebp = uNewEbp;
3948 pCtx->esi = uNewEsi;
3949 pCtx->edi = uNewEdi;
3950
3951 uNewEflags &= X86_EFL_LIVE_MASK;
3952 uNewEflags |= X86_EFL_RA1_MASK;
3953 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3954
3955 /*
3956 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3957 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3958 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3959 */
3960 pCtx->es.Sel = uNewES;
3961 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3962
3963 pCtx->cs.Sel = uNewCS;
3964 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3965
3966 pCtx->ss.Sel = uNewSS;
3967 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3968
3969 pCtx->ds.Sel = uNewDS;
3970 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3971
3972 pCtx->fs.Sel = uNewFS;
3973 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3974
3975 pCtx->gs.Sel = uNewGS;
3976 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3977 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3978
3979 pCtx->ldtr.Sel = uNewLdt;
3980 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3981 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3982 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3983
3984 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3985 {
3986 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3987 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3988 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3989 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3990 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3991 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3992 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3993 }
3994
3995 /*
3996 * Switch CR3 for the new task.
3997 */
3998 if ( fIsNewTSS386
3999 && (pCtx->cr0 & X86_CR0_PG))
4000 {
4001 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4002 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4003 {
4004 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4005 AssertRCSuccessReturn(rc, rc);
4006 }
4007 else
4008 pCtx->cr3 = uNewCr3;
4009
4010 /* Inform PGM. */
4011 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4012 {
4013 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4014 AssertRCReturn(rc, rc);
4015 /* ignore informational status codes */
4016 }
4017 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4018 }
4019
4020 /*
4021 * Switch LDTR for the new task.
4022 */
4023 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4024 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4025 else
4026 {
4027 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4028
4029 IEMSELDESC DescNewLdt;
4030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4031 if (rcStrict != VINF_SUCCESS)
4032 {
4033 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4034 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4035 return rcStrict;
4036 }
4037 if ( !DescNewLdt.Legacy.Gen.u1Present
4038 || DescNewLdt.Legacy.Gen.u1DescType
4039 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4040 {
4041 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4042 uNewLdt, DescNewLdt.Legacy.u));
4043 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4044 }
4045
4046 pCtx->ldtr.ValidSel = uNewLdt;
4047 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4048 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4049 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4050 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4051 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4052 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4053 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4054 }
4055
4056 IEMSELDESC DescSS;
4057 if (IEM_IS_V86_MODE(pVCpu))
4058 {
4059 pVCpu->iem.s.uCpl = 3;
4060 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4061 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4062 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4063 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4064 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4065 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4066
4067 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4068 DescSS.Legacy.u = 0;
4069 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4070 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4071 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4072 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4073 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4074 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4075 DescSS.Legacy.Gen.u2Dpl = 3;
4076 }
4077 else
4078 {
4079 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4080
4081 /*
4082 * Load the stack segment for the new task.
4083 */
4084 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4085 {
4086 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4087 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4088 }
4089
4090 /* Fetch the descriptor. */
4091 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4092 if (rcStrict != VINF_SUCCESS)
4093 {
4094 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4095 VBOXSTRICTRC_VAL(rcStrict)));
4096 return rcStrict;
4097 }
4098
4099 /* SS must be a data segment and writable. */
4100 if ( !DescSS.Legacy.Gen.u1DescType
4101 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4102 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4103 {
4104 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4105 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4106 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4107 }
4108
4109 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4110 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4111 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4112 {
4113 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4114 uNewCpl));
4115 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4116 }
4117
4118 /* Is it there? */
4119 if (!DescSS.Legacy.Gen.u1Present)
4120 {
4121 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4122 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4123 }
4124
4125 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4126 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4127
4128 /* Set the accessed bit before committing the result into SS. */
4129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4130 {
4131 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4132 if (rcStrict != VINF_SUCCESS)
4133 return rcStrict;
4134 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4135 }
4136
4137 /* Commit SS. */
4138 pCtx->ss.Sel = uNewSS;
4139 pCtx->ss.ValidSel = uNewSS;
4140 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4141 pCtx->ss.u32Limit = cbLimit;
4142 pCtx->ss.u64Base = u64Base;
4143 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4145
4146 /* CPL has changed, update IEM before loading rest of segments. */
4147 pVCpu->iem.s.uCpl = uNewCpl;
4148
4149 /*
4150 * Load the data segments for the new task.
4151 */
4152 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4153 if (rcStrict != VINF_SUCCESS)
4154 return rcStrict;
4155 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4156 if (rcStrict != VINF_SUCCESS)
4157 return rcStrict;
4158 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4159 if (rcStrict != VINF_SUCCESS)
4160 return rcStrict;
4161 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4162 if (rcStrict != VINF_SUCCESS)
4163 return rcStrict;
4164
4165 /*
4166 * Load the code segment for the new task.
4167 */
4168 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4169 {
4170 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4171 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4172 }
4173
4174 /* Fetch the descriptor. */
4175 IEMSELDESC DescCS;
4176 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4177 if (rcStrict != VINF_SUCCESS)
4178 {
4179 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4180 return rcStrict;
4181 }
4182
4183 /* CS must be a code segment. */
4184 if ( !DescCS.Legacy.Gen.u1DescType
4185 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4186 {
4187 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4188 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4189 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4190 }
4191
4192 /* For conforming CS, DPL must be less than or equal to the RPL. */
4193 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4194 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4195 {
4196 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4197 DescCS.Legacy.Gen.u2Dpl));
4198 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4199 }
4200
4201 /* For non-conforming CS, DPL must match RPL. */
4202 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4203 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4204 {
4205 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4206 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4207 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4208 }
4209
4210 /* Is it there? */
4211 if (!DescCS.Legacy.Gen.u1Present)
4212 {
4213 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4214 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4215 }
4216
4217 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4218 u64Base = X86DESC_BASE(&DescCS.Legacy);
4219
4220 /* Set the accessed bit before committing the result into CS. */
4221 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4222 {
4223 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4224 if (rcStrict != VINF_SUCCESS)
4225 return rcStrict;
4226 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4227 }
4228
4229 /* Commit CS. */
4230 pCtx->cs.Sel = uNewCS;
4231 pCtx->cs.ValidSel = uNewCS;
4232 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4233 pCtx->cs.u32Limit = cbLimit;
4234 pCtx->cs.u64Base = u64Base;
4235 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4237 }
4238
4239 /** @todo Debug trap. */
4240 if (fIsNewTSS386 && fNewDebugTrap)
4241 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4242
4243 /*
4244 * Construct the error code masks based on what caused this task switch.
4245 * See Intel Instruction reference for INT.
4246 */
4247 uint16_t uExt;
4248 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4249 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4250 {
4251 uExt = 1;
4252 }
4253 else
4254 uExt = 0;
4255
4256 /*
4257 * Push any error code on to the new stack.
4258 */
4259 if (fFlags & IEM_XCPT_FLAGS_ERR)
4260 {
4261 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4262 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4263 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4264
4265 /* Check that there is sufficient space on the stack. */
4266 /** @todo Factor out segment limit checking for normal/expand down segments
4267 * into a separate function. */
4268 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4269 {
4270 if ( pCtx->esp - 1 > cbLimitSS
4271 || pCtx->esp < cbStackFrame)
4272 {
4273 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4274 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4275 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4276 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4277 }
4278 }
4279 else
4280 {
4281 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4282 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4283 {
4284 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4285 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4286 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4287 }
4288 }
4289
4290
4291 if (fIsNewTSS386)
4292 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4293 else
4294 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4295 if (rcStrict != VINF_SUCCESS)
4296 {
4297 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4298 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4299 return rcStrict;
4300 }
4301 }
4302
4303 /* Check the new EIP against the new CS limit. */
4304 if (pCtx->eip > pCtx->cs.u32Limit)
4305 {
4306 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4307 pCtx->eip, pCtx->cs.u32Limit));
4308 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4309 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4310 }
4311
4312 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4313 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4314}
4315
4316
4317/**
4318 * Implements exceptions and interrupts for protected mode.
4319 *
4320 * @returns VBox strict status code.
4321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4322 * @param pCtx The CPU context.
4323 * @param cbInstr The number of bytes to offset rIP by in the return
4324 * address.
4325 * @param u8Vector The interrupt / exception vector number.
4326 * @param fFlags The flags.
4327 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4328 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4329 */
4330IEM_STATIC VBOXSTRICTRC
4331iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4332 PCPUMCTX pCtx,
4333 uint8_t cbInstr,
4334 uint8_t u8Vector,
4335 uint32_t fFlags,
4336 uint16_t uErr,
4337 uint64_t uCr2)
4338{
4339 /*
4340 * Read the IDT entry.
4341 */
4342 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4343 {
4344 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4345 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4346 }
4347 X86DESC Idte;
4348 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4349 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4350 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4351 return rcStrict;
4352 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4353 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4354 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4355
4356 /*
4357 * Check the descriptor type, DPL and such.
4358 * ASSUMES this is done in the same order as described for call-gate calls.
4359 */
4360 if (Idte.Gate.u1DescType)
4361 {
4362 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4363 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4364 }
4365 bool fTaskGate = false;
4366 uint8_t f32BitGate = true;
4367 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4368 switch (Idte.Gate.u4Type)
4369 {
4370 case X86_SEL_TYPE_SYS_UNDEFINED:
4371 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4372 case X86_SEL_TYPE_SYS_LDT:
4373 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4374 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4375 case X86_SEL_TYPE_SYS_UNDEFINED2:
4376 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4377 case X86_SEL_TYPE_SYS_UNDEFINED3:
4378 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4379 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4380 case X86_SEL_TYPE_SYS_UNDEFINED4:
4381 {
4382 /** @todo check what actually happens when the type is wrong...
4383 * esp. call gates. */
4384 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4385 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4386 }
4387
4388 case X86_SEL_TYPE_SYS_286_INT_GATE:
4389 f32BitGate = false;
4390 case X86_SEL_TYPE_SYS_386_INT_GATE:
4391 fEflToClear |= X86_EFL_IF;
4392 break;
4393
4394 case X86_SEL_TYPE_SYS_TASK_GATE:
4395 fTaskGate = true;
4396#ifndef IEM_IMPLEMENTS_TASKSWITCH
4397 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4398#endif
4399 break;
4400
4401 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4402 f32BitGate = false;
4403 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4404 break;
4405
4406 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4407 }
4408
4409 /* Check DPL against CPL if applicable. */
4410 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4411 {
4412 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4413 {
4414 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4415 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4416 }
4417 }
4418
4419 /* Is it there? */
4420 if (!Idte.Gate.u1Present)
4421 {
4422 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4423 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4424 }
4425
4426 /* Is it a task-gate? */
4427 if (fTaskGate)
4428 {
4429 /*
4430 * Construct the error code masks based on what caused this task switch.
4431 * See Intel Instruction reference for INT.
4432 */
4433 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4434 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4435 RTSEL SelTSS = Idte.Gate.u16Sel;
4436
4437 /*
4438 * Fetch the TSS descriptor in the GDT.
4439 */
4440 IEMSELDESC DescTSS;
4441 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4442 if (rcStrict != VINF_SUCCESS)
4443 {
4444 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4445 VBOXSTRICTRC_VAL(rcStrict)));
4446 return rcStrict;
4447 }
4448
4449 /* The TSS descriptor must be a system segment and be available (not busy). */
4450 if ( DescTSS.Legacy.Gen.u1DescType
4451 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4452 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4453 {
4454 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4455 u8Vector, SelTSS, DescTSS.Legacy.au64));
4456 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4457 }
4458
4459 /* The TSS must be present. */
4460 if (!DescTSS.Legacy.Gen.u1Present)
4461 {
4462 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4463 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4464 }
4465
4466 /* Do the actual task switch. */
4467 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4468 }
4469
4470 /* A null CS is bad. */
4471 RTSEL NewCS = Idte.Gate.u16Sel;
4472 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4473 {
4474 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4475 return iemRaiseGeneralProtectionFault0(pVCpu);
4476 }
4477
4478 /* Fetch the descriptor for the new CS. */
4479 IEMSELDESC DescCS;
4480 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4481 if (rcStrict != VINF_SUCCESS)
4482 {
4483 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4484 return rcStrict;
4485 }
4486
4487 /* Must be a code segment. */
4488 if (!DescCS.Legacy.Gen.u1DescType)
4489 {
4490 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4491 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4492 }
4493 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4494 {
4495 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4496 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 /* Don't allow lowering the privilege level. */
4500 /** @todo Does the lowering of privileges apply to software interrupts
4501 * only? This has bearings on the more-privileged or
4502 * same-privilege stack behavior further down. A testcase would
4503 * be nice. */
4504 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4505 {
4506 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4507 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4508 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Make sure the selector is present. */
4512 if (!DescCS.Legacy.Gen.u1Present)
4513 {
4514 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4515 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4516 }
4517
4518 /* Check the new EIP against the new CS limit. */
4519 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4520 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4521 ? Idte.Gate.u16OffsetLow
4522 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4523 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4524 if (uNewEip > cbLimitCS)
4525 {
4526 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4527 u8Vector, uNewEip, cbLimitCS, NewCS));
4528 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4529 }
4530 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4531
4532 /* Calc the flag image to push. */
4533 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4534 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4535 fEfl &= ~X86_EFL_RF;
4536 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4537 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4538
4539 /* From V8086 mode only go to CPL 0. */
4540 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4541 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4542 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4543 {
4544 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4545 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4546 }
4547
4548 /*
4549 * If the privilege level changes, we need to get a new stack from the TSS.
4550 * This in turns means validating the new SS and ESP...
4551 */
4552 if (uNewCpl != pVCpu->iem.s.uCpl)
4553 {
4554 RTSEL NewSS;
4555 uint32_t uNewEsp;
4556 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4557 if (rcStrict != VINF_SUCCESS)
4558 return rcStrict;
4559
4560 IEMSELDESC DescSS;
4561 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4562 if (rcStrict != VINF_SUCCESS)
4563 return rcStrict;
4564 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4565
4566 /* Check that there is sufficient space for the stack frame. */
4567 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4568 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4569 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4570 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4571
4572 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4573 {
4574 if ( uNewEsp - 1 > cbLimitSS
4575 || uNewEsp < cbStackFrame)
4576 {
4577 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4578 u8Vector, NewSS, uNewEsp, cbStackFrame));
4579 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4580 }
4581 }
4582 else
4583 {
4584 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4585 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4586 {
4587 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4588 u8Vector, NewSS, uNewEsp, cbStackFrame));
4589 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4590 }
4591 }
4592
4593 /*
4594 * Start making changes.
4595 */
4596
4597 /* Set the new CPL so that stack accesses use it. */
4598 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4599 pVCpu->iem.s.uCpl = uNewCpl;
4600
4601 /* Create the stack frame. */
4602 RTPTRUNION uStackFrame;
4603 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4604 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4605 if (rcStrict != VINF_SUCCESS)
4606 return rcStrict;
4607 void * const pvStackFrame = uStackFrame.pv;
4608 if (f32BitGate)
4609 {
4610 if (fFlags & IEM_XCPT_FLAGS_ERR)
4611 *uStackFrame.pu32++ = uErr;
4612 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4613 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4614 uStackFrame.pu32[2] = fEfl;
4615 uStackFrame.pu32[3] = pCtx->esp;
4616 uStackFrame.pu32[4] = pCtx->ss.Sel;
4617 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4618 if (fEfl & X86_EFL_VM)
4619 {
4620 uStackFrame.pu32[1] = pCtx->cs.Sel;
4621 uStackFrame.pu32[5] = pCtx->es.Sel;
4622 uStackFrame.pu32[6] = pCtx->ds.Sel;
4623 uStackFrame.pu32[7] = pCtx->fs.Sel;
4624 uStackFrame.pu32[8] = pCtx->gs.Sel;
4625 }
4626 }
4627 else
4628 {
4629 if (fFlags & IEM_XCPT_FLAGS_ERR)
4630 *uStackFrame.pu16++ = uErr;
4631 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4632 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4633 uStackFrame.pu16[2] = fEfl;
4634 uStackFrame.pu16[3] = pCtx->sp;
4635 uStackFrame.pu16[4] = pCtx->ss.Sel;
4636 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4637 if (fEfl & X86_EFL_VM)
4638 {
4639 uStackFrame.pu16[1] = pCtx->cs.Sel;
4640 uStackFrame.pu16[5] = pCtx->es.Sel;
4641 uStackFrame.pu16[6] = pCtx->ds.Sel;
4642 uStackFrame.pu16[7] = pCtx->fs.Sel;
4643 uStackFrame.pu16[8] = pCtx->gs.Sel;
4644 }
4645 }
4646 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4647 if (rcStrict != VINF_SUCCESS)
4648 return rcStrict;
4649
4650 /* Mark the selectors 'accessed' (hope this is the correct time). */
4651 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4652 * after pushing the stack frame? (Write protect the gdt + stack to
4653 * find out.) */
4654 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4655 {
4656 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4657 if (rcStrict != VINF_SUCCESS)
4658 return rcStrict;
4659 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4660 }
4661
4662 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4663 {
4664 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4665 if (rcStrict != VINF_SUCCESS)
4666 return rcStrict;
4667 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4668 }
4669
4670 /*
4671 * Start comitting the register changes (joins with the DPL=CPL branch).
4672 */
4673 pCtx->ss.Sel = NewSS;
4674 pCtx->ss.ValidSel = NewSS;
4675 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4676 pCtx->ss.u32Limit = cbLimitSS;
4677 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4678 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4679 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4680 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4681 * SP is loaded).
4682 * Need to check the other combinations too:
4683 * - 16-bit TSS, 32-bit handler
4684 * - 32-bit TSS, 16-bit handler */
4685 if (!pCtx->ss.Attr.n.u1DefBig)
4686 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4687 else
4688 pCtx->rsp = uNewEsp - cbStackFrame;
4689
4690 if (fEfl & X86_EFL_VM)
4691 {
4692 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4693 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4694 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4695 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4696 }
4697 }
4698 /*
4699 * Same privilege, no stack change and smaller stack frame.
4700 */
4701 else
4702 {
4703 uint64_t uNewRsp;
4704 RTPTRUNION uStackFrame;
4705 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4706 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4707 if (rcStrict != VINF_SUCCESS)
4708 return rcStrict;
4709 void * const pvStackFrame = uStackFrame.pv;
4710
4711 if (f32BitGate)
4712 {
4713 if (fFlags & IEM_XCPT_FLAGS_ERR)
4714 *uStackFrame.pu32++ = uErr;
4715 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4716 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4717 uStackFrame.pu32[2] = fEfl;
4718 }
4719 else
4720 {
4721 if (fFlags & IEM_XCPT_FLAGS_ERR)
4722 *uStackFrame.pu16++ = uErr;
4723 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4724 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4725 uStackFrame.pu16[2] = fEfl;
4726 }
4727 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4728 if (rcStrict != VINF_SUCCESS)
4729 return rcStrict;
4730
4731 /* Mark the CS selector as 'accessed'. */
4732 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4733 {
4734 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4735 if (rcStrict != VINF_SUCCESS)
4736 return rcStrict;
4737 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4738 }
4739
4740 /*
4741 * Start committing the register changes (joins with the other branch).
4742 */
4743 pCtx->rsp = uNewRsp;
4744 }
4745
4746 /* ... register committing continues. */
4747 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4748 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4749 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4750 pCtx->cs.u32Limit = cbLimitCS;
4751 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4752 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4753
4754 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4755 fEfl &= ~fEflToClear;
4756 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4757
4758 if (fFlags & IEM_XCPT_FLAGS_CR2)
4759 pCtx->cr2 = uCr2;
4760
4761 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4762 iemRaiseXcptAdjustState(pCtx, u8Vector);
4763
4764 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4765}
4766
4767
4768/**
4769 * Implements exceptions and interrupts for long mode.
4770 *
4771 * @returns VBox strict status code.
4772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4773 * @param pCtx The CPU context.
4774 * @param cbInstr The number of bytes to offset rIP by in the return
4775 * address.
4776 * @param u8Vector The interrupt / exception vector number.
4777 * @param fFlags The flags.
4778 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4779 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4780 */
4781IEM_STATIC VBOXSTRICTRC
4782iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4783 PCPUMCTX pCtx,
4784 uint8_t cbInstr,
4785 uint8_t u8Vector,
4786 uint32_t fFlags,
4787 uint16_t uErr,
4788 uint64_t uCr2)
4789{
4790 /*
4791 * Read the IDT entry.
4792 */
4793 uint16_t offIdt = (uint16_t)u8Vector << 4;
4794 if (pCtx->idtr.cbIdt < offIdt + 7)
4795 {
4796 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4797 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4798 }
4799 X86DESC64 Idte;
4800 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4801 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4802 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4803 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4804 return rcStrict;
4805 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4806 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4807 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4808
4809 /*
4810 * Check the descriptor type, DPL and such.
4811 * ASSUMES this is done in the same order as described for call-gate calls.
4812 */
4813 if (Idte.Gate.u1DescType)
4814 {
4815 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4816 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4817 }
4818 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4819 switch (Idte.Gate.u4Type)
4820 {
4821 case AMD64_SEL_TYPE_SYS_INT_GATE:
4822 fEflToClear |= X86_EFL_IF;
4823 break;
4824 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4825 break;
4826
4827 default:
4828 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4829 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4830 }
4831
4832 /* Check DPL against CPL if applicable. */
4833 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4834 {
4835 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4836 {
4837 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4838 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4839 }
4840 }
4841
4842 /* Is it there? */
4843 if (!Idte.Gate.u1Present)
4844 {
4845 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4846 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4847 }
4848
4849 /* A null CS is bad. */
4850 RTSEL NewCS = Idte.Gate.u16Sel;
4851 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4852 {
4853 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4854 return iemRaiseGeneralProtectionFault0(pVCpu);
4855 }
4856
4857 /* Fetch the descriptor for the new CS. */
4858 IEMSELDESC DescCS;
4859 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4860 if (rcStrict != VINF_SUCCESS)
4861 {
4862 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4863 return rcStrict;
4864 }
4865
4866 /* Must be a 64-bit code segment. */
4867 if (!DescCS.Long.Gen.u1DescType)
4868 {
4869 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872 if ( !DescCS.Long.Gen.u1Long
4873 || DescCS.Long.Gen.u1DefBig
4874 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4875 {
4876 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4877 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4878 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4879 }
4880
4881 /* Don't allow lowering the privilege level. For non-conforming CS
4882 selectors, the CS.DPL sets the privilege level the trap/interrupt
4883 handler runs at. For conforming CS selectors, the CPL remains
4884 unchanged, but the CS.DPL must be <= CPL. */
4885 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4886 * when CPU in Ring-0. Result \#GP? */
4887 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4888 {
4889 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4890 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4891 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4892 }
4893
4894
4895 /* Make sure the selector is present. */
4896 if (!DescCS.Legacy.Gen.u1Present)
4897 {
4898 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4899 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4900 }
4901
4902 /* Check that the new RIP is canonical. */
4903 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4904 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4905 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4906 if (!IEM_IS_CANONICAL(uNewRip))
4907 {
4908 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4909 return iemRaiseGeneralProtectionFault0(pVCpu);
4910 }
4911
4912 /*
4913 * If the privilege level changes or if the IST isn't zero, we need to get
4914 * a new stack from the TSS.
4915 */
4916 uint64_t uNewRsp;
4917 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4918 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4919 if ( uNewCpl != pVCpu->iem.s.uCpl
4920 || Idte.Gate.u3IST != 0)
4921 {
4922 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4923 if (rcStrict != VINF_SUCCESS)
4924 return rcStrict;
4925 }
4926 else
4927 uNewRsp = pCtx->rsp;
4928 uNewRsp &= ~(uint64_t)0xf;
4929
4930 /*
4931 * Calc the flag image to push.
4932 */
4933 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4934 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4935 fEfl &= ~X86_EFL_RF;
4936 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4937 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4938
4939 /*
4940 * Start making changes.
4941 */
4942 /* Set the new CPL so that stack accesses use it. */
4943 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4944 pVCpu->iem.s.uCpl = uNewCpl;
4945
4946 /* Create the stack frame. */
4947 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4948 RTPTRUNION uStackFrame;
4949 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4950 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4951 if (rcStrict != VINF_SUCCESS)
4952 return rcStrict;
4953 void * const pvStackFrame = uStackFrame.pv;
4954
4955 if (fFlags & IEM_XCPT_FLAGS_ERR)
4956 *uStackFrame.pu64++ = uErr;
4957 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4958 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4959 uStackFrame.pu64[2] = fEfl;
4960 uStackFrame.pu64[3] = pCtx->rsp;
4961 uStackFrame.pu64[4] = pCtx->ss.Sel;
4962 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4963 if (rcStrict != VINF_SUCCESS)
4964 return rcStrict;
4965
4966 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4967 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4968 * after pushing the stack frame? (Write protect the gdt + stack to
4969 * find out.) */
4970 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4971 {
4972 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4973 if (rcStrict != VINF_SUCCESS)
4974 return rcStrict;
4975 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4976 }
4977
4978 /*
4979 * Start comitting the register changes.
4980 */
4981 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4982 * hidden registers when interrupting 32-bit or 16-bit code! */
4983 if (uNewCpl != uOldCpl)
4984 {
4985 pCtx->ss.Sel = 0 | uNewCpl;
4986 pCtx->ss.ValidSel = 0 | uNewCpl;
4987 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4988 pCtx->ss.u32Limit = UINT32_MAX;
4989 pCtx->ss.u64Base = 0;
4990 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4991 }
4992 pCtx->rsp = uNewRsp - cbStackFrame;
4993 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4994 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4995 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4996 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4997 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4998 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4999 pCtx->rip = uNewRip;
5000
5001 fEfl &= ~fEflToClear;
5002 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5003
5004 if (fFlags & IEM_XCPT_FLAGS_CR2)
5005 pCtx->cr2 = uCr2;
5006
5007 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5008 iemRaiseXcptAdjustState(pCtx, u8Vector);
5009
5010 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5011}
5012
5013
5014/**
5015 * Implements exceptions and interrupts.
5016 *
5017 * All exceptions and interrupts goes thru this function!
5018 *
5019 * @returns VBox strict status code.
5020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5021 * @param cbInstr The number of bytes to offset rIP by in the return
5022 * address.
5023 * @param u8Vector The interrupt / exception vector number.
5024 * @param fFlags The flags.
5025 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5026 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5027 */
5028DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5029iemRaiseXcptOrInt(PVMCPU pVCpu,
5030 uint8_t cbInstr,
5031 uint8_t u8Vector,
5032 uint32_t fFlags,
5033 uint16_t uErr,
5034 uint64_t uCr2)
5035{
5036 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5037#ifdef IN_RING0
5038 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5039 AssertRCReturn(rc, rc);
5040#endif
5041
5042#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5043 /*
5044 * Flush prefetch buffer
5045 */
5046 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5047#endif
5048
5049 /*
5050 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5051 */
5052 if ( pCtx->eflags.Bits.u1VM
5053 && pCtx->eflags.Bits.u2IOPL != 3
5054 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5055 && (pCtx->cr0 & X86_CR0_PE) )
5056 {
5057 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5058 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5059 u8Vector = X86_XCPT_GP;
5060 uErr = 0;
5061 }
5062#ifdef DBGFTRACE_ENABLED
5063 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5064 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5065 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5066#endif
5067
5068 /*
5069 * Do recursion accounting.
5070 */
5071 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5072 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5073 if (pVCpu->iem.s.cXcptRecursions == 0)
5074 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5075 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5076 else
5077 {
5078 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5079 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5080
5081 /** @todo double and tripple faults. */
5082 if (pVCpu->iem.s.cXcptRecursions >= 3)
5083 {
5084#ifdef DEBUG_bird
5085 AssertFailed();
5086#endif
5087 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5088 }
5089
5090 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5091 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5092 {
5093 ....
5094 } */
5095 }
5096 pVCpu->iem.s.cXcptRecursions++;
5097 pVCpu->iem.s.uCurXcpt = u8Vector;
5098 pVCpu->iem.s.fCurXcpt = fFlags;
5099
5100 /*
5101 * Extensive logging.
5102 */
5103#if defined(LOG_ENABLED) && defined(IN_RING3)
5104 if (LogIs3Enabled())
5105 {
5106 PVM pVM = pVCpu->CTX_SUFF(pVM);
5107 char szRegs[4096];
5108 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5109 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5110 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5111 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5112 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5113 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5114 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5115 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5116 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5117 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5118 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5119 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5120 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5121 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5122 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5123 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5124 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5125 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5126 " efer=%016VR{efer}\n"
5127 " pat=%016VR{pat}\n"
5128 " sf_mask=%016VR{sf_mask}\n"
5129 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5130 " lstar=%016VR{lstar}\n"
5131 " star=%016VR{star} cstar=%016VR{cstar}\n"
5132 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5133 );
5134
5135 char szInstr[256];
5136 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5137 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5138 szInstr, sizeof(szInstr), NULL);
5139 Log3(("%s%s\n", szRegs, szInstr));
5140 }
5141#endif /* LOG_ENABLED */
5142
5143 /*
5144 * Call the mode specific worker function.
5145 */
5146 VBOXSTRICTRC rcStrict;
5147 if (!(pCtx->cr0 & X86_CR0_PE))
5148 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5149 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5150 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5151 else
5152 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5153
5154 /* Flush the prefetch buffer. */
5155#ifdef IEM_WITH_CODE_TLB
5156 pVCpu->iem.s.pbInstrBuf = NULL;
5157#else
5158 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5159#endif
5160
5161 /*
5162 * Unwind.
5163 */
5164 pVCpu->iem.s.cXcptRecursions--;
5165 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5166 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5167 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5168 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5169 return rcStrict;
5170}
5171
5172#ifdef IEM_WITH_SETJMP
5173/**
5174 * See iemRaiseXcptOrInt. Will not return.
5175 */
5176IEM_STATIC DECL_NO_RETURN(void)
5177iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5178 uint8_t cbInstr,
5179 uint8_t u8Vector,
5180 uint32_t fFlags,
5181 uint16_t uErr,
5182 uint64_t uCr2)
5183{
5184 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5185 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5186}
5187#endif
5188
5189
5190/** \#DE - 00. */
5191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5192{
5193 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5194}
5195
5196
5197/** \#DB - 01.
5198 * @note This automatically clear DR7.GD. */
5199DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5200{
5201 /** @todo set/clear RF. */
5202 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5204}
5205
5206
5207/** \#UD - 06. */
5208DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5209{
5210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5211}
5212
5213
5214/** \#NM - 07. */
5215DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5216{
5217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5218}
5219
5220
5221/** \#TS(err) - 0a. */
5222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5223{
5224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5225}
5226
5227
5228/** \#TS(tr) - 0a. */
5229DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5230{
5231 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5232 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5233}
5234
5235
5236/** \#TS(0) - 0a. */
5237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5238{
5239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5240 0, 0);
5241}
5242
5243
5244/** \#TS(err) - 0a. */
5245DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5246{
5247 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5248 uSel & X86_SEL_MASK_OFF_RPL, 0);
5249}
5250
5251
5252/** \#NP(err) - 0b. */
5253DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5254{
5255 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5256}
5257
5258
5259/** \#NP(seg) - 0b. */
5260DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5261{
5262 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5263 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5264}
5265
5266
5267/** \#NP(sel) - 0b. */
5268DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5269{
5270 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5271 uSel & ~X86_SEL_RPL, 0);
5272}
5273
5274
5275/** \#SS(seg) - 0c. */
5276DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5277{
5278 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5279 uSel & ~X86_SEL_RPL, 0);
5280}
5281
5282
5283/** \#SS(err) - 0c. */
5284DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5285{
5286 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5287}
5288
5289
5290/** \#GP(n) - 0d. */
5291DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5292{
5293 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5294}
5295
5296
5297/** \#GP(0) - 0d. */
5298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5299{
5300 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5301}
5302
5303#ifdef IEM_WITH_SETJMP
5304/** \#GP(0) - 0d. */
5305DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5306{
5307 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5308}
5309#endif
5310
5311
5312/** \#GP(sel) - 0d. */
5313DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5314{
5315 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5316 Sel & ~X86_SEL_RPL, 0);
5317}
5318
5319
5320/** \#GP(0) - 0d. */
5321DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5322{
5323 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5324}
5325
5326
5327/** \#GP(sel) - 0d. */
5328DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5329{
5330 NOREF(iSegReg); NOREF(fAccess);
5331 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5332 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5333}
5334
5335#ifdef IEM_WITH_SETJMP
5336/** \#GP(sel) - 0d, longjmp. */
5337DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5338{
5339 NOREF(iSegReg); NOREF(fAccess);
5340 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5341 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5342}
5343#endif
5344
5345/** \#GP(sel) - 0d. */
5346DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5347{
5348 NOREF(Sel);
5349 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5350}
5351
5352#ifdef IEM_WITH_SETJMP
5353/** \#GP(sel) - 0d, longjmp. */
5354DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5355{
5356 NOREF(Sel);
5357 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5358}
5359#endif
5360
5361
5362/** \#GP(sel) - 0d. */
5363DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5364{
5365 NOREF(iSegReg); NOREF(fAccess);
5366 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5367}
5368
5369#ifdef IEM_WITH_SETJMP
5370/** \#GP(sel) - 0d, longjmp. */
5371DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5372 uint32_t fAccess)
5373{
5374 NOREF(iSegReg); NOREF(fAccess);
5375 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5376}
5377#endif
5378
5379
5380/** \#PF(n) - 0e. */
5381DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5382{
5383 uint16_t uErr;
5384 switch (rc)
5385 {
5386 case VERR_PAGE_NOT_PRESENT:
5387 case VERR_PAGE_TABLE_NOT_PRESENT:
5388 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5389 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5390 uErr = 0;
5391 break;
5392
5393 default:
5394 AssertMsgFailed(("%Rrc\n", rc));
5395 case VERR_ACCESS_DENIED:
5396 uErr = X86_TRAP_PF_P;
5397 break;
5398
5399 /** @todo reserved */
5400 }
5401
5402 if (pVCpu->iem.s.uCpl == 3)
5403 uErr |= X86_TRAP_PF_US;
5404
5405 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5406 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5407 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5408 uErr |= X86_TRAP_PF_ID;
5409
5410#if 0 /* This is so much non-sense, really. Why was it done like that? */
5411 /* Note! RW access callers reporting a WRITE protection fault, will clear
5412 the READ flag before calling. So, read-modify-write accesses (RW)
5413 can safely be reported as READ faults. */
5414 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5415 uErr |= X86_TRAP_PF_RW;
5416#else
5417 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5418 {
5419 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5420 uErr |= X86_TRAP_PF_RW;
5421 }
5422#endif
5423
5424 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5425 uErr, GCPtrWhere);
5426}
5427
5428#ifdef IEM_WITH_SETJMP
5429/** \#PF(n) - 0e, longjmp. */
5430IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5431{
5432 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5433}
5434#endif
5435
5436
5437/** \#MF(0) - 10. */
5438DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5439{
5440 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5441}
5442
5443
5444/** \#AC(0) - 11. */
5445DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5446{
5447 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5448}
5449
5450
5451/**
5452 * Macro for calling iemCImplRaiseDivideError().
5453 *
5454 * This enables us to add/remove arguments and force different levels of
5455 * inlining as we wish.
5456 *
5457 * @return Strict VBox status code.
5458 */
5459#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5460IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5461{
5462 NOREF(cbInstr);
5463 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5464}
5465
5466
5467/**
5468 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5469 *
5470 * This enables us to add/remove arguments and force different levels of
5471 * inlining as we wish.
5472 *
5473 * @return Strict VBox status code.
5474 */
5475#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5476IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5477{
5478 NOREF(cbInstr);
5479 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5480}
5481
5482
5483/**
5484 * Macro for calling iemCImplRaiseInvalidOpcode().
5485 *
5486 * This enables us to add/remove arguments and force different levels of
5487 * inlining as we wish.
5488 *
5489 * @return Strict VBox status code.
5490 */
5491#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5492IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5493{
5494 NOREF(cbInstr);
5495 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5496}
5497
5498
5499/** @} */
5500
5501
5502/*
5503 *
5504 * Helpers routines.
5505 * Helpers routines.
5506 * Helpers routines.
5507 *
5508 */
5509
5510/**
5511 * Recalculates the effective operand size.
5512 *
5513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5514 */
5515IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5516{
5517 switch (pVCpu->iem.s.enmCpuMode)
5518 {
5519 case IEMMODE_16BIT:
5520 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5521 break;
5522 case IEMMODE_32BIT:
5523 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5524 break;
5525 case IEMMODE_64BIT:
5526 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5527 {
5528 case 0:
5529 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5530 break;
5531 case IEM_OP_PRF_SIZE_OP:
5532 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5533 break;
5534 case IEM_OP_PRF_SIZE_REX_W:
5535 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5536 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5537 break;
5538 }
5539 break;
5540 default:
5541 AssertFailed();
5542 }
5543}
5544
5545
5546/**
5547 * Sets the default operand size to 64-bit and recalculates the effective
5548 * operand size.
5549 *
5550 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5551 */
5552IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5553{
5554 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5555 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5556 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5557 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5558 else
5559 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5560}
5561
5562
5563/*
5564 *
5565 * Common opcode decoders.
5566 * Common opcode decoders.
5567 * Common opcode decoders.
5568 *
5569 */
5570//#include <iprt/mem.h>
5571
5572/**
5573 * Used to add extra details about a stub case.
5574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5575 */
5576IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5577{
5578#if defined(LOG_ENABLED) && defined(IN_RING3)
5579 PVM pVM = pVCpu->CTX_SUFF(pVM);
5580 char szRegs[4096];
5581 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5582 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5583 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5584 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5585 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5586 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5587 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5588 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5589 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5590 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5591 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5592 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5593 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5594 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5595 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5596 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5597 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5598 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5599 " efer=%016VR{efer}\n"
5600 " pat=%016VR{pat}\n"
5601 " sf_mask=%016VR{sf_mask}\n"
5602 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5603 " lstar=%016VR{lstar}\n"
5604 " star=%016VR{star} cstar=%016VR{cstar}\n"
5605 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5606 );
5607
5608 char szInstr[256];
5609 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5610 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5611 szInstr, sizeof(szInstr), NULL);
5612
5613 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5614#else
5615 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5616#endif
5617}
5618
5619/**
5620 * Complains about a stub.
5621 *
5622 * Providing two versions of this macro, one for daily use and one for use when
5623 * working on IEM.
5624 */
5625#if 0
5626# define IEMOP_BITCH_ABOUT_STUB() \
5627 do { \
5628 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5629 iemOpStubMsg2(pVCpu); \
5630 RTAssertPanic(); \
5631 } while (0)
5632#else
5633# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5634#endif
5635
5636/** Stubs an opcode. */
5637#define FNIEMOP_STUB(a_Name) \
5638 FNIEMOP_DEF(a_Name) \
5639 { \
5640 RT_NOREF_PV(pVCpu); \
5641 IEMOP_BITCH_ABOUT_STUB(); \
5642 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5643 } \
5644 typedef int ignore_semicolon
5645
5646/** Stubs an opcode. */
5647#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5648 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5649 { \
5650 RT_NOREF_PV(pVCpu); \
5651 RT_NOREF_PV(a_Name0); \
5652 IEMOP_BITCH_ABOUT_STUB(); \
5653 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5654 } \
5655 typedef int ignore_semicolon
5656
5657/** Stubs an opcode which currently should raise \#UD. */
5658#define FNIEMOP_UD_STUB(a_Name) \
5659 FNIEMOP_DEF(a_Name) \
5660 { \
5661 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5662 return IEMOP_RAISE_INVALID_OPCODE(); \
5663 } \
5664 typedef int ignore_semicolon
5665
5666/** Stubs an opcode which currently should raise \#UD. */
5667#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5668 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5669 { \
5670 RT_NOREF_PV(pVCpu); \
5671 RT_NOREF_PV(a_Name0); \
5672 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5673 return IEMOP_RAISE_INVALID_OPCODE(); \
5674 } \
5675 typedef int ignore_semicolon
5676
5677
5678
5679/** @name Register Access.
5680 * @{
5681 */
5682
5683/**
5684 * Gets a reference (pointer) to the specified hidden segment register.
5685 *
5686 * @returns Hidden register reference.
5687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5688 * @param iSegReg The segment register.
5689 */
5690IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5691{
5692 Assert(iSegReg < X86_SREG_COUNT);
5693 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5694 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5695
5696#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5697 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5698 { /* likely */ }
5699 else
5700 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5701#else
5702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5703#endif
5704 return pSReg;
5705}
5706
5707
5708/**
5709 * Ensures that the given hidden segment register is up to date.
5710 *
5711 * @returns Hidden register reference.
5712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5713 * @param pSReg The segment register.
5714 */
5715IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5716{
5717#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5718 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5719 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5720#else
5721 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5722 NOREF(pVCpu);
5723#endif
5724 return pSReg;
5725}
5726
5727
5728/**
5729 * Gets a reference (pointer) to the specified segment register (the selector
5730 * value).
5731 *
5732 * @returns Pointer to the selector variable.
5733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5734 * @param iSegReg The segment register.
5735 */
5736DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5737{
5738 Assert(iSegReg < X86_SREG_COUNT);
5739 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5740 return &pCtx->aSRegs[iSegReg].Sel;
5741}
5742
5743
5744/**
5745 * Fetches the selector value of a segment register.
5746 *
5747 * @returns The selector value.
5748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5749 * @param iSegReg The segment register.
5750 */
5751DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5752{
5753 Assert(iSegReg < X86_SREG_COUNT);
5754 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5755}
5756
5757
5758/**
5759 * Gets a reference (pointer) to the specified general purpose register.
5760 *
5761 * @returns Register reference.
5762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5763 * @param iReg The general purpose register.
5764 */
5765DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5766{
5767 Assert(iReg < 16);
5768 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5769 return &pCtx->aGRegs[iReg];
5770}
5771
5772
5773/**
5774 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5775 *
5776 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5777 *
5778 * @returns Register reference.
5779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5780 * @param iReg The register.
5781 */
5782DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5783{
5784 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5785 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5786 {
5787 Assert(iReg < 16);
5788 return &pCtx->aGRegs[iReg].u8;
5789 }
5790 /* high 8-bit register. */
5791 Assert(iReg < 8);
5792 return &pCtx->aGRegs[iReg & 3].bHi;
5793}
5794
5795
5796/**
5797 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5798 *
5799 * @returns Register reference.
5800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5801 * @param iReg The register.
5802 */
5803DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5804{
5805 Assert(iReg < 16);
5806 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5807 return &pCtx->aGRegs[iReg].u16;
5808}
5809
5810
5811/**
5812 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5813 *
5814 * @returns Register reference.
5815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5816 * @param iReg The register.
5817 */
5818DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5819{
5820 Assert(iReg < 16);
5821 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5822 return &pCtx->aGRegs[iReg].u32;
5823}
5824
5825
5826/**
5827 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5828 *
5829 * @returns Register reference.
5830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5831 * @param iReg The register.
5832 */
5833DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5834{
5835 Assert(iReg < 64);
5836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5837 return &pCtx->aGRegs[iReg].u64;
5838}
5839
5840
5841/**
5842 * Fetches the value of a 8-bit general purpose register.
5843 *
5844 * @returns The register value.
5845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5846 * @param iReg The register.
5847 */
5848DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5849{
5850 return *iemGRegRefU8(pVCpu, iReg);
5851}
5852
5853
5854/**
5855 * Fetches the value of a 16-bit general purpose register.
5856 *
5857 * @returns The register value.
5858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5859 * @param iReg The register.
5860 */
5861DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5862{
5863 Assert(iReg < 16);
5864 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5865}
5866
5867
5868/**
5869 * Fetches the value of a 32-bit general purpose register.
5870 *
5871 * @returns The register value.
5872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5873 * @param iReg The register.
5874 */
5875DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5876{
5877 Assert(iReg < 16);
5878 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5879}
5880
5881
5882/**
5883 * Fetches the value of a 64-bit general purpose register.
5884 *
5885 * @returns The register value.
5886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5887 * @param iReg The register.
5888 */
5889DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5890{
5891 Assert(iReg < 16);
5892 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5893}
5894
5895
5896/**
5897 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5898 *
5899 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5900 * segment limit.
5901 *
5902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5903 * @param offNextInstr The offset of the next instruction.
5904 */
5905IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5906{
5907 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5908 switch (pVCpu->iem.s.enmEffOpSize)
5909 {
5910 case IEMMODE_16BIT:
5911 {
5912 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5913 if ( uNewIp > pCtx->cs.u32Limit
5914 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5915 return iemRaiseGeneralProtectionFault0(pVCpu);
5916 pCtx->rip = uNewIp;
5917 break;
5918 }
5919
5920 case IEMMODE_32BIT:
5921 {
5922 Assert(pCtx->rip <= UINT32_MAX);
5923 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5924
5925 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5926 if (uNewEip > pCtx->cs.u32Limit)
5927 return iemRaiseGeneralProtectionFault0(pVCpu);
5928 pCtx->rip = uNewEip;
5929 break;
5930 }
5931
5932 case IEMMODE_64BIT:
5933 {
5934 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5935
5936 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5937 if (!IEM_IS_CANONICAL(uNewRip))
5938 return iemRaiseGeneralProtectionFault0(pVCpu);
5939 pCtx->rip = uNewRip;
5940 break;
5941 }
5942
5943 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5944 }
5945
5946 pCtx->eflags.Bits.u1RF = 0;
5947
5948#ifndef IEM_WITH_CODE_TLB
5949 /* Flush the prefetch buffer. */
5950 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5951#endif
5952
5953 return VINF_SUCCESS;
5954}
5955
5956
5957/**
5958 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5959 *
5960 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5961 * segment limit.
5962 *
5963 * @returns Strict VBox status code.
5964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5965 * @param offNextInstr The offset of the next instruction.
5966 */
5967IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5968{
5969 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5970 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5971
5972 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5973 if ( uNewIp > pCtx->cs.u32Limit
5974 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5975 return iemRaiseGeneralProtectionFault0(pVCpu);
5976 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5977 pCtx->rip = uNewIp;
5978 pCtx->eflags.Bits.u1RF = 0;
5979
5980#ifndef IEM_WITH_CODE_TLB
5981 /* Flush the prefetch buffer. */
5982 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5983#endif
5984
5985 return VINF_SUCCESS;
5986}
5987
5988
5989/**
5990 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5991 *
5992 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5993 * segment limit.
5994 *
5995 * @returns Strict VBox status code.
5996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5997 * @param offNextInstr The offset of the next instruction.
5998 */
5999IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6000{
6001 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6002 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6003
6004 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6005 {
6006 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6007
6008 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6009 if (uNewEip > pCtx->cs.u32Limit)
6010 return iemRaiseGeneralProtectionFault0(pVCpu);
6011 pCtx->rip = uNewEip;
6012 }
6013 else
6014 {
6015 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6016
6017 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6018 if (!IEM_IS_CANONICAL(uNewRip))
6019 return iemRaiseGeneralProtectionFault0(pVCpu);
6020 pCtx->rip = uNewRip;
6021 }
6022 pCtx->eflags.Bits.u1RF = 0;
6023
6024#ifndef IEM_WITH_CODE_TLB
6025 /* Flush the prefetch buffer. */
6026 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6027#endif
6028
6029 return VINF_SUCCESS;
6030}
6031
6032
6033/**
6034 * Performs a near jump to the specified address.
6035 *
6036 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6037 * segment limit.
6038 *
6039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6040 * @param uNewRip The new RIP value.
6041 */
6042IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6043{
6044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6045 switch (pVCpu->iem.s.enmEffOpSize)
6046 {
6047 case IEMMODE_16BIT:
6048 {
6049 Assert(uNewRip <= UINT16_MAX);
6050 if ( uNewRip > pCtx->cs.u32Limit
6051 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6052 return iemRaiseGeneralProtectionFault0(pVCpu);
6053 /** @todo Test 16-bit jump in 64-bit mode. */
6054 pCtx->rip = uNewRip;
6055 break;
6056 }
6057
6058 case IEMMODE_32BIT:
6059 {
6060 Assert(uNewRip <= UINT32_MAX);
6061 Assert(pCtx->rip <= UINT32_MAX);
6062 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6063
6064 if (uNewRip > pCtx->cs.u32Limit)
6065 return iemRaiseGeneralProtectionFault0(pVCpu);
6066 pCtx->rip = uNewRip;
6067 break;
6068 }
6069
6070 case IEMMODE_64BIT:
6071 {
6072 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6073
6074 if (!IEM_IS_CANONICAL(uNewRip))
6075 return iemRaiseGeneralProtectionFault0(pVCpu);
6076 pCtx->rip = uNewRip;
6077 break;
6078 }
6079
6080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6081 }
6082
6083 pCtx->eflags.Bits.u1RF = 0;
6084
6085#ifndef IEM_WITH_CODE_TLB
6086 /* Flush the prefetch buffer. */
6087 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6088#endif
6089
6090 return VINF_SUCCESS;
6091}
6092
6093
6094/**
6095 * Get the address of the top of the stack.
6096 *
6097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6098 * @param pCtx The CPU context which SP/ESP/RSP should be
6099 * read.
6100 */
6101DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6102{
6103 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6104 return pCtx->rsp;
6105 if (pCtx->ss.Attr.n.u1DefBig)
6106 return pCtx->esp;
6107 return pCtx->sp;
6108}
6109
6110
6111/**
6112 * Updates the RIP/EIP/IP to point to the next instruction.
6113 *
6114 * This function leaves the EFLAGS.RF flag alone.
6115 *
6116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6117 * @param cbInstr The number of bytes to add.
6118 */
6119IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6120{
6121 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6122 switch (pVCpu->iem.s.enmCpuMode)
6123 {
6124 case IEMMODE_16BIT:
6125 Assert(pCtx->rip <= UINT16_MAX);
6126 pCtx->eip += cbInstr;
6127 pCtx->eip &= UINT32_C(0xffff);
6128 break;
6129
6130 case IEMMODE_32BIT:
6131 pCtx->eip += cbInstr;
6132 Assert(pCtx->rip <= UINT32_MAX);
6133 break;
6134
6135 case IEMMODE_64BIT:
6136 pCtx->rip += cbInstr;
6137 break;
6138 default: AssertFailed();
6139 }
6140}
6141
6142
6143#if 0
6144/**
6145 * Updates the RIP/EIP/IP to point to the next instruction.
6146 *
6147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6148 */
6149IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6150{
6151 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6152}
6153#endif
6154
6155
6156
6157/**
6158 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6159 *
6160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6161 * @param cbInstr The number of bytes to add.
6162 */
6163IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6164{
6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6166
6167 pCtx->eflags.Bits.u1RF = 0;
6168
6169 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6170#if ARCH_BITS >= 64
6171 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6172 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6173 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6174#else
6175 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6176 pCtx->rip += cbInstr;
6177 else
6178 {
6179 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6180 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6181 }
6182#endif
6183}
6184
6185
6186/**
6187 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6188 *
6189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6190 */
6191IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6192{
6193 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6194}
6195
6196
6197/**
6198 * Adds to the stack pointer.
6199 *
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 * @param pCtx The CPU context which SP/ESP/RSP should be
6202 * updated.
6203 * @param cbToAdd The number of bytes to add (8-bit!).
6204 */
6205DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6206{
6207 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6208 pCtx->rsp += cbToAdd;
6209 else if (pCtx->ss.Attr.n.u1DefBig)
6210 pCtx->esp += cbToAdd;
6211 else
6212 pCtx->sp += cbToAdd;
6213}
6214
6215
6216/**
6217 * Subtracts from the stack pointer.
6218 *
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param pCtx The CPU context which SP/ESP/RSP should be
6221 * updated.
6222 * @param cbToSub The number of bytes to subtract (8-bit!).
6223 */
6224DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6225{
6226 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6227 pCtx->rsp -= cbToSub;
6228 else if (pCtx->ss.Attr.n.u1DefBig)
6229 pCtx->esp -= cbToSub;
6230 else
6231 pCtx->sp -= cbToSub;
6232}
6233
6234
6235/**
6236 * Adds to the temporary stack pointer.
6237 *
6238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6239 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6240 * @param cbToAdd The number of bytes to add (16-bit).
6241 * @param pCtx Where to get the current stack mode.
6242 */
6243DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6244{
6245 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6246 pTmpRsp->u += cbToAdd;
6247 else if (pCtx->ss.Attr.n.u1DefBig)
6248 pTmpRsp->DWords.dw0 += cbToAdd;
6249 else
6250 pTmpRsp->Words.w0 += cbToAdd;
6251}
6252
6253
6254/**
6255 * Subtracts from the temporary stack pointer.
6256 *
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6259 * @param cbToSub The number of bytes to subtract.
6260 * @param pCtx Where to get the current stack mode.
6261 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6262 * expecting that.
6263 */
6264DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6265{
6266 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6267 pTmpRsp->u -= cbToSub;
6268 else if (pCtx->ss.Attr.n.u1DefBig)
6269 pTmpRsp->DWords.dw0 -= cbToSub;
6270 else
6271 pTmpRsp->Words.w0 -= cbToSub;
6272}
6273
6274
6275/**
6276 * Calculates the effective stack address for a push of the specified size as
6277 * well as the new RSP value (upper bits may be masked).
6278 *
6279 * @returns Effective stack addressf for the push.
6280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6281 * @param pCtx Where to get the current stack mode.
6282 * @param cbItem The size of the stack item to pop.
6283 * @param puNewRsp Where to return the new RSP value.
6284 */
6285DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6286{
6287 RTUINT64U uTmpRsp;
6288 RTGCPTR GCPtrTop;
6289 uTmpRsp.u = pCtx->rsp;
6290
6291 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6292 GCPtrTop = uTmpRsp.u -= cbItem;
6293 else if (pCtx->ss.Attr.n.u1DefBig)
6294 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6295 else
6296 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6297 *puNewRsp = uTmpRsp.u;
6298 return GCPtrTop;
6299}
6300
6301
6302/**
6303 * Gets the current stack pointer and calculates the value after a pop of the
6304 * specified size.
6305 *
6306 * @returns Current stack pointer.
6307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6308 * @param pCtx Where to get the current stack mode.
6309 * @param cbItem The size of the stack item to pop.
6310 * @param puNewRsp Where to return the new RSP value.
6311 */
6312DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6313{
6314 RTUINT64U uTmpRsp;
6315 RTGCPTR GCPtrTop;
6316 uTmpRsp.u = pCtx->rsp;
6317
6318 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6319 {
6320 GCPtrTop = uTmpRsp.u;
6321 uTmpRsp.u += cbItem;
6322 }
6323 else if (pCtx->ss.Attr.n.u1DefBig)
6324 {
6325 GCPtrTop = uTmpRsp.DWords.dw0;
6326 uTmpRsp.DWords.dw0 += cbItem;
6327 }
6328 else
6329 {
6330 GCPtrTop = uTmpRsp.Words.w0;
6331 uTmpRsp.Words.w0 += cbItem;
6332 }
6333 *puNewRsp = uTmpRsp.u;
6334 return GCPtrTop;
6335}
6336
6337
6338/**
6339 * Calculates the effective stack address for a push of the specified size as
6340 * well as the new temporary RSP value (upper bits may be masked).
6341 *
6342 * @returns Effective stack addressf for the push.
6343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6344 * @param pCtx Where to get the current stack mode.
6345 * @param pTmpRsp The temporary stack pointer. This is updated.
6346 * @param cbItem The size of the stack item to pop.
6347 */
6348DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6349{
6350 RTGCPTR GCPtrTop;
6351
6352 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6353 GCPtrTop = pTmpRsp->u -= cbItem;
6354 else if (pCtx->ss.Attr.n.u1DefBig)
6355 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6356 else
6357 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6358 return GCPtrTop;
6359}
6360
6361
6362/**
6363 * Gets the effective stack address for a pop of the specified size and
6364 * calculates and updates the temporary RSP.
6365 *
6366 * @returns Current stack pointer.
6367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6368 * @param pCtx Where to get the current stack mode.
6369 * @param pTmpRsp The temporary stack pointer. This is updated.
6370 * @param cbItem The size of the stack item to pop.
6371 */
6372DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6373{
6374 RTGCPTR GCPtrTop;
6375 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6376 {
6377 GCPtrTop = pTmpRsp->u;
6378 pTmpRsp->u += cbItem;
6379 }
6380 else if (pCtx->ss.Attr.n.u1DefBig)
6381 {
6382 GCPtrTop = pTmpRsp->DWords.dw0;
6383 pTmpRsp->DWords.dw0 += cbItem;
6384 }
6385 else
6386 {
6387 GCPtrTop = pTmpRsp->Words.w0;
6388 pTmpRsp->Words.w0 += cbItem;
6389 }
6390 return GCPtrTop;
6391}
6392
6393/** @} */
6394
6395
6396/** @name FPU access and helpers.
6397 *
6398 * @{
6399 */
6400
6401
6402/**
6403 * Hook for preparing to use the host FPU.
6404 *
6405 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6406 *
6407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6408 */
6409DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6410{
6411#ifdef IN_RING3
6412 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6413#else
6414 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6415#endif
6416}
6417
6418
6419/**
6420 * Hook for preparing to use the host FPU for SSE
6421 *
6422 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6423 *
6424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6425 */
6426DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6427{
6428 iemFpuPrepareUsage(pVCpu);
6429}
6430
6431
6432/**
6433 * Hook for actualizing the guest FPU state before the interpreter reads it.
6434 *
6435 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6436 *
6437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6438 */
6439DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6440{
6441#ifdef IN_RING3
6442 NOREF(pVCpu);
6443#else
6444 CPUMRZFpuStateActualizeForRead(pVCpu);
6445#endif
6446}
6447
6448
6449/**
6450 * Hook for actualizing the guest FPU state before the interpreter changes it.
6451 *
6452 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6453 *
6454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6455 */
6456DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6457{
6458#ifdef IN_RING3
6459 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6460#else
6461 CPUMRZFpuStateActualizeForChange(pVCpu);
6462#endif
6463}
6464
6465
6466/**
6467 * Hook for actualizing the guest XMM0..15 register state for read only.
6468 *
6469 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6470 *
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 */
6473DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6474{
6475#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6476 NOREF(pVCpu);
6477#else
6478 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6479#endif
6480}
6481
6482
6483/**
6484 * Hook for actualizing the guest XMM0..15 register state for read+write.
6485 *
6486 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6487 *
6488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6489 */
6490DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6491{
6492#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6493 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6494#else
6495 CPUMRZFpuStateActualizeForChange(pVCpu);
6496#endif
6497}
6498
6499
6500/**
6501 * Stores a QNaN value into a FPU register.
6502 *
6503 * @param pReg Pointer to the register.
6504 */
6505DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6506{
6507 pReg->au32[0] = UINT32_C(0x00000000);
6508 pReg->au32[1] = UINT32_C(0xc0000000);
6509 pReg->au16[4] = UINT16_C(0xffff);
6510}
6511
6512
6513/**
6514 * Updates the FOP, FPU.CS and FPUIP registers.
6515 *
6516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6517 * @param pCtx The CPU context.
6518 * @param pFpuCtx The FPU context.
6519 */
6520DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6521{
6522 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6523 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6524 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6525 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6526 {
6527 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6528 * happens in real mode here based on the fnsave and fnstenv images. */
6529 pFpuCtx->CS = 0;
6530 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6531 }
6532 else
6533 {
6534 pFpuCtx->CS = pCtx->cs.Sel;
6535 pFpuCtx->FPUIP = pCtx->rip;
6536 }
6537}
6538
6539
6540/**
6541 * Updates the x87.DS and FPUDP registers.
6542 *
6543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6544 * @param pCtx The CPU context.
6545 * @param pFpuCtx The FPU context.
6546 * @param iEffSeg The effective segment register.
6547 * @param GCPtrEff The effective address relative to @a iEffSeg.
6548 */
6549DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6550{
6551 RTSEL sel;
6552 switch (iEffSeg)
6553 {
6554 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6555 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6556 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6557 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6558 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6559 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6560 default:
6561 AssertMsgFailed(("%d\n", iEffSeg));
6562 sel = pCtx->ds.Sel;
6563 }
6564 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6565 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6566 {
6567 pFpuCtx->DS = 0;
6568 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6569 }
6570 else
6571 {
6572 pFpuCtx->DS = sel;
6573 pFpuCtx->FPUDP = GCPtrEff;
6574 }
6575}
6576
6577
6578/**
6579 * Rotates the stack registers in the push direction.
6580 *
6581 * @param pFpuCtx The FPU context.
6582 * @remarks This is a complete waste of time, but fxsave stores the registers in
6583 * stack order.
6584 */
6585DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6586{
6587 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6588 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6589 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6590 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6591 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6592 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6593 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6594 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6595 pFpuCtx->aRegs[0].r80 = r80Tmp;
6596}
6597
6598
6599/**
6600 * Rotates the stack registers in the pop direction.
6601 *
6602 * @param pFpuCtx The FPU context.
6603 * @remarks This is a complete waste of time, but fxsave stores the registers in
6604 * stack order.
6605 */
6606DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6607{
6608 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6609 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6610 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6611 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6612 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6613 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6614 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6615 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6616 pFpuCtx->aRegs[7].r80 = r80Tmp;
6617}
6618
6619
6620/**
6621 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6622 * exception prevents it.
6623 *
6624 * @param pResult The FPU operation result to push.
6625 * @param pFpuCtx The FPU context.
6626 */
6627IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6628{
6629 /* Update FSW and bail if there are pending exceptions afterwards. */
6630 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6631 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6632 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6633 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6634 {
6635 pFpuCtx->FSW = fFsw;
6636 return;
6637 }
6638
6639 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6640 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6641 {
6642 /* All is fine, push the actual value. */
6643 pFpuCtx->FTW |= RT_BIT(iNewTop);
6644 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6645 }
6646 else if (pFpuCtx->FCW & X86_FCW_IM)
6647 {
6648 /* Masked stack overflow, push QNaN. */
6649 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6650 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6651 }
6652 else
6653 {
6654 /* Raise stack overflow, don't push anything. */
6655 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6657 return;
6658 }
6659
6660 fFsw &= ~X86_FSW_TOP_MASK;
6661 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6662 pFpuCtx->FSW = fFsw;
6663
6664 iemFpuRotateStackPush(pFpuCtx);
6665}
6666
6667
6668/**
6669 * Stores a result in a FPU register and updates the FSW and FTW.
6670 *
6671 * @param pFpuCtx The FPU context.
6672 * @param pResult The result to store.
6673 * @param iStReg Which FPU register to store it in.
6674 */
6675IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6676{
6677 Assert(iStReg < 8);
6678 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6679 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6680 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6681 pFpuCtx->FTW |= RT_BIT(iReg);
6682 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6683}
6684
6685
6686/**
6687 * Only updates the FPU status word (FSW) with the result of the current
6688 * instruction.
6689 *
6690 * @param pFpuCtx The FPU context.
6691 * @param u16FSW The FSW output of the current instruction.
6692 */
6693IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6694{
6695 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6696 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6697}
6698
6699
6700/**
6701 * Pops one item off the FPU stack if no pending exception prevents it.
6702 *
6703 * @param pFpuCtx The FPU context.
6704 */
6705IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6706{
6707 /* Check pending exceptions. */
6708 uint16_t uFSW = pFpuCtx->FSW;
6709 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6710 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6711 return;
6712
6713 /* TOP--. */
6714 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6715 uFSW &= ~X86_FSW_TOP_MASK;
6716 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6717 pFpuCtx->FSW = uFSW;
6718
6719 /* Mark the previous ST0 as empty. */
6720 iOldTop >>= X86_FSW_TOP_SHIFT;
6721 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6722
6723 /* Rotate the registers. */
6724 iemFpuRotateStackPop(pFpuCtx);
6725}
6726
6727
6728/**
6729 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6730 *
6731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6732 * @param pResult The FPU operation result to push.
6733 */
6734IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6735{
6736 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6737 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6738 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6739 iemFpuMaybePushResult(pResult, pFpuCtx);
6740}
6741
6742
6743/**
6744 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6745 * and sets FPUDP and FPUDS.
6746 *
6747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6748 * @param pResult The FPU operation result to push.
6749 * @param iEffSeg The effective segment register.
6750 * @param GCPtrEff The effective address relative to @a iEffSeg.
6751 */
6752IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6753{
6754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6755 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6756 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6758 iemFpuMaybePushResult(pResult, pFpuCtx);
6759}
6760
6761
6762/**
6763 * Replace ST0 with the first value and push the second onto the FPU stack,
6764 * unless a pending exception prevents it.
6765 *
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param pResult The FPU operation result to store and push.
6768 */
6769IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6770{
6771 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6772 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6773 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6774
6775 /* Update FSW and bail if there are pending exceptions afterwards. */
6776 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6777 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6778 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6779 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6780 {
6781 pFpuCtx->FSW = fFsw;
6782 return;
6783 }
6784
6785 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6786 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6787 {
6788 /* All is fine, push the actual value. */
6789 pFpuCtx->FTW |= RT_BIT(iNewTop);
6790 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6791 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6792 }
6793 else if (pFpuCtx->FCW & X86_FCW_IM)
6794 {
6795 /* Masked stack overflow, push QNaN. */
6796 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6797 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6798 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6799 }
6800 else
6801 {
6802 /* Raise stack overflow, don't push anything. */
6803 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6804 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6805 return;
6806 }
6807
6808 fFsw &= ~X86_FSW_TOP_MASK;
6809 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6810 pFpuCtx->FSW = fFsw;
6811
6812 iemFpuRotateStackPush(pFpuCtx);
6813}
6814
6815
6816/**
6817 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6818 * FOP.
6819 *
6820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6821 * @param pResult The result to store.
6822 * @param iStReg Which FPU register to store it in.
6823 */
6824IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6825{
6826 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6827 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6828 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6829 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6830}
6831
6832
6833/**
6834 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6835 * FOP, and then pops the stack.
6836 *
6837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6838 * @param pResult The result to store.
6839 * @param iStReg Which FPU register to store it in.
6840 */
6841IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6842{
6843 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6844 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6845 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6846 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6847 iemFpuMaybePopOne(pFpuCtx);
6848}
6849
6850
6851/**
6852 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6853 * FPUDP, and FPUDS.
6854 *
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pResult The result to store.
6857 * @param iStReg Which FPU register to store it in.
6858 * @param iEffSeg The effective memory operand selector register.
6859 * @param GCPtrEff The effective memory operand offset.
6860 */
6861IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6862 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6863{
6864 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6865 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6866 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6867 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6868 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6869}
6870
6871
6872/**
6873 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6874 * FPUDP, and FPUDS, and then pops the stack.
6875 *
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 * @param pResult The result to store.
6878 * @param iStReg Which FPU register to store it in.
6879 * @param iEffSeg The effective memory operand selector register.
6880 * @param GCPtrEff The effective memory operand offset.
6881 */
6882IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6883 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6884{
6885 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6886 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6887 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6888 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6889 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6890 iemFpuMaybePopOne(pFpuCtx);
6891}
6892
6893
6894/**
6895 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6896 *
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 */
6899IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6900{
6901 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6902 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6903 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6904}
6905
6906
6907/**
6908 * Marks the specified stack register as free (for FFREE).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 * @param iStReg The register to free.
6912 */
6913IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6914{
6915 Assert(iStReg < 8);
6916 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6917 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6918 pFpuCtx->FTW &= ~RT_BIT(iReg);
6919}
6920
6921
6922/**
6923 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6924 *
6925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6926 */
6927IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6928{
6929 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6930 uint16_t uFsw = pFpuCtx->FSW;
6931 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6932 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6933 uFsw &= ~X86_FSW_TOP_MASK;
6934 uFsw |= uTop;
6935 pFpuCtx->FSW = uFsw;
6936}
6937
6938
6939/**
6940 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6941 *
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 */
6944IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6945{
6946 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6947 uint16_t uFsw = pFpuCtx->FSW;
6948 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6949 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6950 uFsw &= ~X86_FSW_TOP_MASK;
6951 uFsw |= uTop;
6952 pFpuCtx->FSW = uFsw;
6953}
6954
6955
6956/**
6957 * Updates the FSW, FOP, FPUIP, and FPUCS.
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 * @param u16FSW The FSW from the current instruction.
6961 */
6962IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6963{
6964 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6965 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6966 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6967 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6968}
6969
6970
6971/**
6972 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 * @param u16FSW The FSW from the current instruction.
6976 */
6977IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6978{
6979 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6980 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6981 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6982 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6983 iemFpuMaybePopOne(pFpuCtx);
6984}
6985
6986
6987/**
6988 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6989 *
6990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6991 * @param u16FSW The FSW from the current instruction.
6992 * @param iEffSeg The effective memory operand selector register.
6993 * @param GCPtrEff The effective memory operand offset.
6994 */
6995IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6996{
6997 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6998 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6999 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7000 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7001 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7002}
7003
7004
7005/**
7006 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7007 *
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 * @param u16FSW The FSW from the current instruction.
7010 */
7011IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7012{
7013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7014 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7015 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7016 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7017 iemFpuMaybePopOne(pFpuCtx);
7018 iemFpuMaybePopOne(pFpuCtx);
7019}
7020
7021
7022/**
7023 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 * @param u16FSW The FSW from the current instruction.
7027 * @param iEffSeg The effective memory operand selector register.
7028 * @param GCPtrEff The effective memory operand offset.
7029 */
7030IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7031{
7032 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7033 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7034 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7035 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7036 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7037 iemFpuMaybePopOne(pFpuCtx);
7038}
7039
7040
7041/**
7042 * Worker routine for raising an FPU stack underflow exception.
7043 *
7044 * @param pFpuCtx The FPU context.
7045 * @param iStReg The stack register being accessed.
7046 */
7047IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7048{
7049 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7050 if (pFpuCtx->FCW & X86_FCW_IM)
7051 {
7052 /* Masked underflow. */
7053 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7054 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7055 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7056 if (iStReg != UINT8_MAX)
7057 {
7058 pFpuCtx->FTW |= RT_BIT(iReg);
7059 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7060 }
7061 }
7062 else
7063 {
7064 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7065 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7066 }
7067}
7068
7069
7070/**
7071 * Raises a FPU stack underflow exception.
7072 *
7073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7074 * @param iStReg The destination register that should be loaded
7075 * with QNaN if \#IS is not masked. Specify
7076 * UINT8_MAX if none (like for fcom).
7077 */
7078DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7079{
7080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7081 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7082 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7083 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7084}
7085
7086
7087DECL_NO_INLINE(IEM_STATIC, void)
7088iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7089{
7090 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7091 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7092 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7093 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7094 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7095}
7096
7097
7098DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7099{
7100 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7101 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7102 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7103 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7104 iemFpuMaybePopOne(pFpuCtx);
7105}
7106
7107
7108DECL_NO_INLINE(IEM_STATIC, void)
7109iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7110{
7111 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7112 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7113 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7114 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7115 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7116 iemFpuMaybePopOne(pFpuCtx);
7117}
7118
7119
7120DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7121{
7122 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7123 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7124 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7125 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7126 iemFpuMaybePopOne(pFpuCtx);
7127 iemFpuMaybePopOne(pFpuCtx);
7128}
7129
7130
7131DECL_NO_INLINE(IEM_STATIC, void)
7132iemFpuStackPushUnderflow(PVMCPU pVCpu)
7133{
7134 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7135 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7136 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7137
7138 if (pFpuCtx->FCW & X86_FCW_IM)
7139 {
7140 /* Masked overflow - Push QNaN. */
7141 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7142 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7143 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7144 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7145 pFpuCtx->FTW |= RT_BIT(iNewTop);
7146 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7147 iemFpuRotateStackPush(pFpuCtx);
7148 }
7149 else
7150 {
7151 /* Exception pending - don't change TOP or the register stack. */
7152 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7153 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7154 }
7155}
7156
7157
7158DECL_NO_INLINE(IEM_STATIC, void)
7159iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7160{
7161 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7162 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7163 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7164
7165 if (pFpuCtx->FCW & X86_FCW_IM)
7166 {
7167 /* Masked overflow - Push QNaN. */
7168 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7169 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7170 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7171 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7172 pFpuCtx->FTW |= RT_BIT(iNewTop);
7173 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7174 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7175 iemFpuRotateStackPush(pFpuCtx);
7176 }
7177 else
7178 {
7179 /* Exception pending - don't change TOP or the register stack. */
7180 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7181 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7182 }
7183}
7184
7185
7186/**
7187 * Worker routine for raising an FPU stack overflow exception on a push.
7188 *
7189 * @param pFpuCtx The FPU context.
7190 */
7191IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7192{
7193 if (pFpuCtx->FCW & X86_FCW_IM)
7194 {
7195 /* Masked overflow. */
7196 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7197 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7198 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7199 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7200 pFpuCtx->FTW |= RT_BIT(iNewTop);
7201 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7202 iemFpuRotateStackPush(pFpuCtx);
7203 }
7204 else
7205 {
7206 /* Exception pending - don't change TOP or the register stack. */
7207 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7208 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7209 }
7210}
7211
7212
7213/**
7214 * Raises a FPU stack overflow exception on a push.
7215 *
7216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7217 */
7218DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7219{
7220 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7221 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7222 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7223 iemFpuStackPushOverflowOnly(pFpuCtx);
7224}
7225
7226
7227/**
7228 * Raises a FPU stack overflow exception on a push with a memory operand.
7229 *
7230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7231 * @param iEffSeg The effective memory operand selector register.
7232 * @param GCPtrEff The effective memory operand offset.
7233 */
7234DECL_NO_INLINE(IEM_STATIC, void)
7235iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7236{
7237 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7238 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7239 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7240 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7241 iemFpuStackPushOverflowOnly(pFpuCtx);
7242}
7243
7244
7245IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7246{
7247 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7248 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7249 if (pFpuCtx->FTW & RT_BIT(iReg))
7250 return VINF_SUCCESS;
7251 return VERR_NOT_FOUND;
7252}
7253
7254
7255IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7256{
7257 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7258 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7259 if (pFpuCtx->FTW & RT_BIT(iReg))
7260 {
7261 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7262 return VINF_SUCCESS;
7263 }
7264 return VERR_NOT_FOUND;
7265}
7266
7267
7268IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7269 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7270{
7271 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7272 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7273 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7274 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7275 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7276 {
7277 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7278 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7279 return VINF_SUCCESS;
7280 }
7281 return VERR_NOT_FOUND;
7282}
7283
7284
7285IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7286{
7287 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7288 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7289 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7290 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7291 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7292 {
7293 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7294 return VINF_SUCCESS;
7295 }
7296 return VERR_NOT_FOUND;
7297}
7298
7299
7300/**
7301 * Updates the FPU exception status after FCW is changed.
7302 *
7303 * @param pFpuCtx The FPU context.
7304 */
7305IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7306{
7307 uint16_t u16Fsw = pFpuCtx->FSW;
7308 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7309 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7310 else
7311 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7312 pFpuCtx->FSW = u16Fsw;
7313}
7314
7315
7316/**
7317 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7318 *
7319 * @returns The full FTW.
7320 * @param pFpuCtx The FPU context.
7321 */
7322IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7323{
7324 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7325 uint16_t u16Ftw = 0;
7326 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7327 for (unsigned iSt = 0; iSt < 8; iSt++)
7328 {
7329 unsigned const iReg = (iSt + iTop) & 7;
7330 if (!(u8Ftw & RT_BIT(iReg)))
7331 u16Ftw |= 3 << (iReg * 2); /* empty */
7332 else
7333 {
7334 uint16_t uTag;
7335 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7336 if (pr80Reg->s.uExponent == 0x7fff)
7337 uTag = 2; /* Exponent is all 1's => Special. */
7338 else if (pr80Reg->s.uExponent == 0x0000)
7339 {
7340 if (pr80Reg->s.u64Mantissa == 0x0000)
7341 uTag = 1; /* All bits are zero => Zero. */
7342 else
7343 uTag = 2; /* Must be special. */
7344 }
7345 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7346 uTag = 0; /* Valid. */
7347 else
7348 uTag = 2; /* Must be special. */
7349
7350 u16Ftw |= uTag << (iReg * 2); /* empty */
7351 }
7352 }
7353
7354 return u16Ftw;
7355}
7356
7357
7358/**
7359 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7360 *
7361 * @returns The compressed FTW.
7362 * @param u16FullFtw The full FTW to convert.
7363 */
7364IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7365{
7366 uint8_t u8Ftw = 0;
7367 for (unsigned i = 0; i < 8; i++)
7368 {
7369 if ((u16FullFtw & 3) != 3 /*empty*/)
7370 u8Ftw |= RT_BIT(i);
7371 u16FullFtw >>= 2;
7372 }
7373
7374 return u8Ftw;
7375}
7376
7377/** @} */
7378
7379
7380/** @name Memory access.
7381 *
7382 * @{
7383 */
7384
7385
7386/**
7387 * Updates the IEMCPU::cbWritten counter if applicable.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param fAccess The access being accounted for.
7391 * @param cbMem The access size.
7392 */
7393DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7394{
7395 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7396 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7397 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7398}
7399
7400
7401/**
7402 * Checks if the given segment can be written to, raise the appropriate
7403 * exception if not.
7404 *
7405 * @returns VBox strict status code.
7406 *
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param pHid Pointer to the hidden register.
7409 * @param iSegReg The register number.
7410 * @param pu64BaseAddr Where to return the base address to use for the
7411 * segment. (In 64-bit code it may differ from the
7412 * base in the hidden segment.)
7413 */
7414IEM_STATIC VBOXSTRICTRC
7415iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7416{
7417 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7418 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7419 else
7420 {
7421 if (!pHid->Attr.n.u1Present)
7422 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7423
7424 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7425 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7426 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7427 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7428 *pu64BaseAddr = pHid->u64Base;
7429 }
7430 return VINF_SUCCESS;
7431}
7432
7433
7434/**
7435 * Checks if the given segment can be read from, raise the appropriate
7436 * exception if not.
7437 *
7438 * @returns VBox strict status code.
7439 *
7440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7441 * @param pHid Pointer to the hidden register.
7442 * @param iSegReg The register number.
7443 * @param pu64BaseAddr Where to return the base address to use for the
7444 * segment. (In 64-bit code it may differ from the
7445 * base in the hidden segment.)
7446 */
7447IEM_STATIC VBOXSTRICTRC
7448iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7449{
7450 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7451 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7452 else
7453 {
7454 if (!pHid->Attr.n.u1Present)
7455 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7456
7457 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7458 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7459 *pu64BaseAddr = pHid->u64Base;
7460 }
7461 return VINF_SUCCESS;
7462}
7463
7464
7465/**
7466 * Applies the segment limit, base and attributes.
7467 *
7468 * This may raise a \#GP or \#SS.
7469 *
7470 * @returns VBox strict status code.
7471 *
7472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7473 * @param fAccess The kind of access which is being performed.
7474 * @param iSegReg The index of the segment register to apply.
7475 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7476 * TSS, ++).
7477 * @param cbMem The access size.
7478 * @param pGCPtrMem Pointer to the guest memory address to apply
7479 * segmentation to. Input and output parameter.
7480 */
7481IEM_STATIC VBOXSTRICTRC
7482iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7483{
7484 if (iSegReg == UINT8_MAX)
7485 return VINF_SUCCESS;
7486
7487 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7488 switch (pVCpu->iem.s.enmCpuMode)
7489 {
7490 case IEMMODE_16BIT:
7491 case IEMMODE_32BIT:
7492 {
7493 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7494 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7495
7496 if ( pSel->Attr.n.u1Present
7497 && !pSel->Attr.n.u1Unusable)
7498 {
7499 Assert(pSel->Attr.n.u1DescType);
7500 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7501 {
7502 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7503 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7504 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7505
7506 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7507 {
7508 /** @todo CPL check. */
7509 }
7510
7511 /*
7512 * There are two kinds of data selectors, normal and expand down.
7513 */
7514 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7515 {
7516 if ( GCPtrFirst32 > pSel->u32Limit
7517 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7518 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7519 }
7520 else
7521 {
7522 /*
7523 * The upper boundary is defined by the B bit, not the G bit!
7524 */
7525 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7526 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7527 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7528 }
7529 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7530 }
7531 else
7532 {
7533
7534 /*
7535 * Code selector and usually be used to read thru, writing is
7536 * only permitted in real and V8086 mode.
7537 */
7538 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7539 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7540 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7541 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7542 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7543
7544 if ( GCPtrFirst32 > pSel->u32Limit
7545 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7546 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7547
7548 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7549 {
7550 /** @todo CPL check. */
7551 }
7552
7553 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7554 }
7555 }
7556 else
7557 return iemRaiseGeneralProtectionFault0(pVCpu);
7558 return VINF_SUCCESS;
7559 }
7560
7561 case IEMMODE_64BIT:
7562 {
7563 RTGCPTR GCPtrMem = *pGCPtrMem;
7564 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7565 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7566
7567 Assert(cbMem >= 1);
7568 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7569 return VINF_SUCCESS;
7570 return iemRaiseGeneralProtectionFault0(pVCpu);
7571 }
7572
7573 default:
7574 AssertFailedReturn(VERR_IEM_IPE_7);
7575 }
7576}
7577
7578
7579/**
7580 * Translates a virtual address to a physical physical address and checks if we
7581 * can access the page as specified.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param GCPtrMem The virtual address.
7585 * @param fAccess The intended access.
7586 * @param pGCPhysMem Where to return the physical address.
7587 */
7588IEM_STATIC VBOXSTRICTRC
7589iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7590{
7591 /** @todo Need a different PGM interface here. We're currently using
7592 * generic / REM interfaces. this won't cut it for R0 & RC. */
7593 RTGCPHYS GCPhys;
7594 uint64_t fFlags;
7595 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7596 if (RT_FAILURE(rc))
7597 {
7598 /** @todo Check unassigned memory in unpaged mode. */
7599 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7600 *pGCPhysMem = NIL_RTGCPHYS;
7601 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7602 }
7603
7604 /* If the page is writable and does not have the no-exec bit set, all
7605 access is allowed. Otherwise we'll have to check more carefully... */
7606 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7607 {
7608 /* Write to read only memory? */
7609 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7610 && !(fFlags & X86_PTE_RW)
7611 && ( pVCpu->iem.s.uCpl == 3
7612 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7613 {
7614 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7615 *pGCPhysMem = NIL_RTGCPHYS;
7616 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7617 }
7618
7619 /* Kernel memory accessed by userland? */
7620 if ( !(fFlags & X86_PTE_US)
7621 && pVCpu->iem.s.uCpl == 3
7622 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7623 {
7624 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7625 *pGCPhysMem = NIL_RTGCPHYS;
7626 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7627 }
7628
7629 /* Executing non-executable memory? */
7630 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7631 && (fFlags & X86_PTE_PAE_NX)
7632 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7633 {
7634 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7635 *pGCPhysMem = NIL_RTGCPHYS;
7636 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7637 VERR_ACCESS_DENIED);
7638 }
7639 }
7640
7641 /*
7642 * Set the dirty / access flags.
7643 * ASSUMES this is set when the address is translated rather than on committ...
7644 */
7645 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7646 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7647 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7648 {
7649 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7650 AssertRC(rc2);
7651 }
7652
7653 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7654 *pGCPhysMem = GCPhys;
7655 return VINF_SUCCESS;
7656}
7657
7658
7659
7660/**
7661 * Maps a physical page.
7662 *
7663 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7665 * @param GCPhysMem The physical address.
7666 * @param fAccess The intended access.
7667 * @param ppvMem Where to return the mapping address.
7668 * @param pLock The PGM lock.
7669 */
7670IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7671{
7672#ifdef IEM_VERIFICATION_MODE_FULL
7673 /* Force the alternative path so we can ignore writes. */
7674 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7675 {
7676 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7677 {
7678 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7679 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7680 if (RT_FAILURE(rc2))
7681 pVCpu->iem.s.fProblematicMemory = true;
7682 }
7683 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7684 }
7685#endif
7686#ifdef IEM_LOG_MEMORY_WRITES
7687 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7688 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7689#endif
7690#ifdef IEM_VERIFICATION_MODE_MINIMAL
7691 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7692#endif
7693
7694 /** @todo This API may require some improving later. A private deal with PGM
7695 * regarding locking and unlocking needs to be struct. A couple of TLBs
7696 * living in PGM, but with publicly accessible inlined access methods
7697 * could perhaps be an even better solution. */
7698 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7699 GCPhysMem,
7700 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7701 pVCpu->iem.s.fBypassHandlers,
7702 ppvMem,
7703 pLock);
7704 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7705 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7706
7707#ifdef IEM_VERIFICATION_MODE_FULL
7708 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7709 pVCpu->iem.s.fProblematicMemory = true;
7710#endif
7711 return rc;
7712}
7713
7714
7715/**
7716 * Unmap a page previously mapped by iemMemPageMap.
7717 *
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param GCPhysMem The physical address.
7720 * @param fAccess The intended access.
7721 * @param pvMem What iemMemPageMap returned.
7722 * @param pLock The PGM lock.
7723 */
7724DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7725{
7726 NOREF(pVCpu);
7727 NOREF(GCPhysMem);
7728 NOREF(fAccess);
7729 NOREF(pvMem);
7730 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7731}
7732
7733
7734/**
7735 * Looks up a memory mapping entry.
7736 *
7737 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7739 * @param pvMem The memory address.
7740 * @param fAccess The access to.
7741 */
7742DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7743{
7744 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7745 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7746 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7747 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7748 return 0;
7749 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7750 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7751 return 1;
7752 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7753 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7754 return 2;
7755 return VERR_NOT_FOUND;
7756}
7757
7758
7759/**
7760 * Finds a free memmap entry when using iNextMapping doesn't work.
7761 *
7762 * @returns Memory mapping index, 1024 on failure.
7763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7764 */
7765IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7766{
7767 /*
7768 * The easy case.
7769 */
7770 if (pVCpu->iem.s.cActiveMappings == 0)
7771 {
7772 pVCpu->iem.s.iNextMapping = 1;
7773 return 0;
7774 }
7775
7776 /* There should be enough mappings for all instructions. */
7777 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7778
7779 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7780 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7781 return i;
7782
7783 AssertFailedReturn(1024);
7784}
7785
7786
7787/**
7788 * Commits a bounce buffer that needs writing back and unmaps it.
7789 *
7790 * @returns Strict VBox status code.
7791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7792 * @param iMemMap The index of the buffer to commit.
7793 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7794 * Always false in ring-3, obviously.
7795 */
7796IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7797{
7798 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7799 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7800#ifdef IN_RING3
7801 Assert(!fPostponeFail);
7802 RT_NOREF_PV(fPostponeFail);
7803#endif
7804
7805 /*
7806 * Do the writing.
7807 */
7808#ifndef IEM_VERIFICATION_MODE_MINIMAL
7809 PVM pVM = pVCpu->CTX_SUFF(pVM);
7810 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7811 && !IEM_VERIFICATION_ENABLED(pVCpu))
7812 {
7813 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7814 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7815 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7816 if (!pVCpu->iem.s.fBypassHandlers)
7817 {
7818 /*
7819 * Carefully and efficiently dealing with access handler return
7820 * codes make this a little bloated.
7821 */
7822 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7824 pbBuf,
7825 cbFirst,
7826 PGMACCESSORIGIN_IEM);
7827 if (rcStrict == VINF_SUCCESS)
7828 {
7829 if (cbSecond)
7830 {
7831 rcStrict = PGMPhysWrite(pVM,
7832 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7833 pbBuf + cbFirst,
7834 cbSecond,
7835 PGMACCESSORIGIN_IEM);
7836 if (rcStrict == VINF_SUCCESS)
7837 { /* nothing */ }
7838 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7839 {
7840 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7843 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7844 }
7845# ifndef IN_RING3
7846 else if (fPostponeFail)
7847 {
7848 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7850 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7852 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7853 return iemSetPassUpStatus(pVCpu, rcStrict);
7854 }
7855# endif
7856 else
7857 {
7858 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7859 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7861 return rcStrict;
7862 }
7863 }
7864 }
7865 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7866 {
7867 if (!cbSecond)
7868 {
7869 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7871 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7872 }
7873 else
7874 {
7875 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7877 pbBuf + cbFirst,
7878 cbSecond,
7879 PGMACCESSORIGIN_IEM);
7880 if (rcStrict2 == VINF_SUCCESS)
7881 {
7882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7885 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7886 }
7887 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7888 {
7889 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7890 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7892 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7894 }
7895# ifndef IN_RING3
7896 else if (fPostponeFail)
7897 {
7898 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7902 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7903 return iemSetPassUpStatus(pVCpu, rcStrict);
7904 }
7905# endif
7906 else
7907 {
7908 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7911 return rcStrict2;
7912 }
7913 }
7914 }
7915# ifndef IN_RING3
7916 else if (fPostponeFail)
7917 {
7918 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7919 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7921 if (!cbSecond)
7922 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7923 else
7924 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7925 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7926 return iemSetPassUpStatus(pVCpu, rcStrict);
7927 }
7928# endif
7929 else
7930 {
7931 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7934 return rcStrict;
7935 }
7936 }
7937 else
7938 {
7939 /*
7940 * No access handlers, much simpler.
7941 */
7942 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7943 if (RT_SUCCESS(rc))
7944 {
7945 if (cbSecond)
7946 {
7947 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7948 if (RT_SUCCESS(rc))
7949 { /* likely */ }
7950 else
7951 {
7952 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7955 return rc;
7956 }
7957 }
7958 }
7959 else
7960 {
7961 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7964 return rc;
7965 }
7966 }
7967 }
7968#endif
7969
7970#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7971 /*
7972 * Record the write(s).
7973 */
7974 if (!pVCpu->iem.s.fNoRem)
7975 {
7976 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7977 if (pEvtRec)
7978 {
7979 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7980 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7981 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7982 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7983 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7984 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7985 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7986 }
7987 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7988 {
7989 pEvtRec = iemVerifyAllocRecord(pVCpu);
7990 if (pEvtRec)
7991 {
7992 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7993 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7994 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7995 memcpy(pEvtRec->u.RamWrite.ab,
7996 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7997 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7998 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7999 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8000 }
8001 }
8002 }
8003#endif
8004#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8005 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8006 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8007 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8008 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8009 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8010 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8011
8012 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8013 g_cbIemWrote = cbWrote;
8014 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8015#endif
8016
8017 /*
8018 * Free the mapping entry.
8019 */
8020 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8021 Assert(pVCpu->iem.s.cActiveMappings != 0);
8022 pVCpu->iem.s.cActiveMappings--;
8023 return VINF_SUCCESS;
8024}
8025
8026
8027/**
8028 * iemMemMap worker that deals with a request crossing pages.
8029 */
8030IEM_STATIC VBOXSTRICTRC
8031iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8032{
8033 /*
8034 * Do the address translations.
8035 */
8036 RTGCPHYS GCPhysFirst;
8037 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8038 if (rcStrict != VINF_SUCCESS)
8039 return rcStrict;
8040
8041 RTGCPHYS GCPhysSecond;
8042 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8043 fAccess, &GCPhysSecond);
8044 if (rcStrict != VINF_SUCCESS)
8045 return rcStrict;
8046 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8047
8048 PVM pVM = pVCpu->CTX_SUFF(pVM);
8049#ifdef IEM_VERIFICATION_MODE_FULL
8050 /*
8051 * Detect problematic memory when verifying so we can select
8052 * the right execution engine. (TLB: Redo this.)
8053 */
8054 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8055 {
8056 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8057 if (RT_SUCCESS(rc2))
8058 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8059 if (RT_FAILURE(rc2))
8060 pVCpu->iem.s.fProblematicMemory = true;
8061 }
8062#endif
8063
8064
8065 /*
8066 * Read in the current memory content if it's a read, execute or partial
8067 * write access.
8068 */
8069 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8070 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8071 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8072
8073 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8074 {
8075 if (!pVCpu->iem.s.fBypassHandlers)
8076 {
8077 /*
8078 * Must carefully deal with access handler status codes here,
8079 * makes the code a bit bloated.
8080 */
8081 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8082 if (rcStrict == VINF_SUCCESS)
8083 {
8084 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8085 if (rcStrict == VINF_SUCCESS)
8086 { /*likely */ }
8087 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8088 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8089 else
8090 {
8091 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8092 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8093 return rcStrict;
8094 }
8095 }
8096 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8097 {
8098 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8099 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8100 {
8101 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8102 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8103 }
8104 else
8105 {
8106 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8107 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8108 return rcStrict2;
8109 }
8110 }
8111 else
8112 {
8113 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8114 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8115 return rcStrict;
8116 }
8117 }
8118 else
8119 {
8120 /*
8121 * No informational status codes here, much more straight forward.
8122 */
8123 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8124 if (RT_SUCCESS(rc))
8125 {
8126 Assert(rc == VINF_SUCCESS);
8127 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8128 if (RT_SUCCESS(rc))
8129 Assert(rc == VINF_SUCCESS);
8130 else
8131 {
8132 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8133 return rc;
8134 }
8135 }
8136 else
8137 {
8138 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8139 return rc;
8140 }
8141 }
8142
8143#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8144 if ( !pVCpu->iem.s.fNoRem
8145 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8146 {
8147 /*
8148 * Record the reads.
8149 */
8150 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8151 if (pEvtRec)
8152 {
8153 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8154 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8155 pEvtRec->u.RamRead.cb = cbFirstPage;
8156 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8157 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8158 }
8159 pEvtRec = iemVerifyAllocRecord(pVCpu);
8160 if (pEvtRec)
8161 {
8162 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8163 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8164 pEvtRec->u.RamRead.cb = cbSecondPage;
8165 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8166 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8167 }
8168 }
8169#endif
8170 }
8171#ifdef VBOX_STRICT
8172 else
8173 memset(pbBuf, 0xcc, cbMem);
8174 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8175 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8176#endif
8177
8178 /*
8179 * Commit the bounce buffer entry.
8180 */
8181 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8182 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8183 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8184 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8185 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8186 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8187 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8188 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8189 pVCpu->iem.s.cActiveMappings++;
8190
8191 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8192 *ppvMem = pbBuf;
8193 return VINF_SUCCESS;
8194}
8195
8196
8197/**
8198 * iemMemMap woker that deals with iemMemPageMap failures.
8199 */
8200IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8201 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8202{
8203 /*
8204 * Filter out conditions we can handle and the ones which shouldn't happen.
8205 */
8206 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8207 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8208 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8209 {
8210 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8211 return rcMap;
8212 }
8213 pVCpu->iem.s.cPotentialExits++;
8214
8215 /*
8216 * Read in the current memory content if it's a read, execute or partial
8217 * write access.
8218 */
8219 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8220 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8221 {
8222 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8223 memset(pbBuf, 0xff, cbMem);
8224 else
8225 {
8226 int rc;
8227 if (!pVCpu->iem.s.fBypassHandlers)
8228 {
8229 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8230 if (rcStrict == VINF_SUCCESS)
8231 { /* nothing */ }
8232 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8233 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8234 else
8235 {
8236 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8237 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8238 return rcStrict;
8239 }
8240 }
8241 else
8242 {
8243 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8244 if (RT_SUCCESS(rc))
8245 { /* likely */ }
8246 else
8247 {
8248 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8249 GCPhysFirst, rc));
8250 return rc;
8251 }
8252 }
8253 }
8254
8255#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8256 if ( !pVCpu->iem.s.fNoRem
8257 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8258 {
8259 /*
8260 * Record the read.
8261 */
8262 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8263 if (pEvtRec)
8264 {
8265 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8266 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8267 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8268 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8269 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8270 }
8271 }
8272#endif
8273 }
8274#ifdef VBOX_STRICT
8275 else
8276 memset(pbBuf, 0xcc, cbMem);
8277#endif
8278#ifdef VBOX_STRICT
8279 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8280 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8281#endif
8282
8283 /*
8284 * Commit the bounce buffer entry.
8285 */
8286 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8287 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8288 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8289 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8290 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8291 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8292 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8293 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8294 pVCpu->iem.s.cActiveMappings++;
8295
8296 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8297 *ppvMem = pbBuf;
8298 return VINF_SUCCESS;
8299}
8300
8301
8302
8303/**
8304 * Maps the specified guest memory for the given kind of access.
8305 *
8306 * This may be using bounce buffering of the memory if it's crossing a page
8307 * boundary or if there is an access handler installed for any of it. Because
8308 * of lock prefix guarantees, we're in for some extra clutter when this
8309 * happens.
8310 *
8311 * This may raise a \#GP, \#SS, \#PF or \#AC.
8312 *
8313 * @returns VBox strict status code.
8314 *
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 * @param ppvMem Where to return the pointer to the mapped
8317 * memory.
8318 * @param cbMem The number of bytes to map. This is usually 1,
8319 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8320 * string operations it can be up to a page.
8321 * @param iSegReg The index of the segment register to use for
8322 * this access. The base and limits are checked.
8323 * Use UINT8_MAX to indicate that no segmentation
8324 * is required (for IDT, GDT and LDT accesses).
8325 * @param GCPtrMem The address of the guest memory.
8326 * @param fAccess How the memory is being accessed. The
8327 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8328 * how to map the memory, while the
8329 * IEM_ACCESS_WHAT_XXX bit is used when raising
8330 * exceptions.
8331 */
8332IEM_STATIC VBOXSTRICTRC
8333iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8334{
8335 /*
8336 * Check the input and figure out which mapping entry to use.
8337 */
8338 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8339 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8340 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8341
8342 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8343 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8344 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8345 {
8346 iMemMap = iemMemMapFindFree(pVCpu);
8347 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8348 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8349 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8350 pVCpu->iem.s.aMemMappings[2].fAccess),
8351 VERR_IEM_IPE_9);
8352 }
8353
8354 /*
8355 * Map the memory, checking that we can actually access it. If something
8356 * slightly complicated happens, fall back on bounce buffering.
8357 */
8358 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8359 if (rcStrict != VINF_SUCCESS)
8360 return rcStrict;
8361
8362 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8363 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8364
8365 RTGCPHYS GCPhysFirst;
8366 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8367 if (rcStrict != VINF_SUCCESS)
8368 return rcStrict;
8369
8370 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8371 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8372 if (fAccess & IEM_ACCESS_TYPE_READ)
8373 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8374
8375 void *pvMem;
8376 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8377 if (rcStrict != VINF_SUCCESS)
8378 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8379
8380 /*
8381 * Fill in the mapping table entry.
8382 */
8383 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8384 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8385 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8386 pVCpu->iem.s.cActiveMappings++;
8387
8388 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8389 *ppvMem = pvMem;
8390 return VINF_SUCCESS;
8391}
8392
8393
8394/**
8395 * Commits the guest memory if bounce buffered and unmaps it.
8396 *
8397 * @returns Strict VBox status code.
8398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8399 * @param pvMem The mapping.
8400 * @param fAccess The kind of access.
8401 */
8402IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8403{
8404 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8405 AssertReturn(iMemMap >= 0, iMemMap);
8406
8407 /* If it's bounce buffered, we may need to write back the buffer. */
8408 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8409 {
8410 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8411 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8412 }
8413 /* Otherwise unlock it. */
8414 else
8415 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8416
8417 /* Free the entry. */
8418 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8419 Assert(pVCpu->iem.s.cActiveMappings != 0);
8420 pVCpu->iem.s.cActiveMappings--;
8421 return VINF_SUCCESS;
8422}
8423
8424#ifdef IEM_WITH_SETJMP
8425
8426/**
8427 * Maps the specified guest memory for the given kind of access, longjmp on
8428 * error.
8429 *
8430 * This may be using bounce buffering of the memory if it's crossing a page
8431 * boundary or if there is an access handler installed for any of it. Because
8432 * of lock prefix guarantees, we're in for some extra clutter when this
8433 * happens.
8434 *
8435 * This may raise a \#GP, \#SS, \#PF or \#AC.
8436 *
8437 * @returns Pointer to the mapped memory.
8438 *
8439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8440 * @param cbMem The number of bytes to map. This is usually 1,
8441 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8442 * string operations it can be up to a page.
8443 * @param iSegReg The index of the segment register to use for
8444 * this access. The base and limits are checked.
8445 * Use UINT8_MAX to indicate that no segmentation
8446 * is required (for IDT, GDT and LDT accesses).
8447 * @param GCPtrMem The address of the guest memory.
8448 * @param fAccess How the memory is being accessed. The
8449 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8450 * how to map the memory, while the
8451 * IEM_ACCESS_WHAT_XXX bit is used when raising
8452 * exceptions.
8453 */
8454IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8455{
8456 /*
8457 * Check the input and figure out which mapping entry to use.
8458 */
8459 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8460 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8461 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8462
8463 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8464 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8465 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8466 {
8467 iMemMap = iemMemMapFindFree(pVCpu);
8468 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8469 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8470 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8471 pVCpu->iem.s.aMemMappings[2].fAccess),
8472 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8473 }
8474
8475 /*
8476 * Map the memory, checking that we can actually access it. If something
8477 * slightly complicated happens, fall back on bounce buffering.
8478 */
8479 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8480 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8481 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8482
8483 /* Crossing a page boundary? */
8484 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8485 { /* No (likely). */ }
8486 else
8487 {
8488 void *pvMem;
8489 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8490 if (rcStrict == VINF_SUCCESS)
8491 return pvMem;
8492 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8493 }
8494
8495 RTGCPHYS GCPhysFirst;
8496 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8497 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8498 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8499
8500 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8501 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8502 if (fAccess & IEM_ACCESS_TYPE_READ)
8503 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8504
8505 void *pvMem;
8506 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8507 if (rcStrict == VINF_SUCCESS)
8508 { /* likely */ }
8509 else
8510 {
8511 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8512 if (rcStrict == VINF_SUCCESS)
8513 return pvMem;
8514 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8515 }
8516
8517 /*
8518 * Fill in the mapping table entry.
8519 */
8520 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8521 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8522 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8523 pVCpu->iem.s.cActiveMappings++;
8524
8525 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8526 return pvMem;
8527}
8528
8529
8530/**
8531 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8532 *
8533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8534 * @param pvMem The mapping.
8535 * @param fAccess The kind of access.
8536 */
8537IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8538{
8539 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8540 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8541
8542 /* If it's bounce buffered, we may need to write back the buffer. */
8543 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8544 {
8545 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8546 {
8547 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8548 if (rcStrict == VINF_SUCCESS)
8549 return;
8550 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8551 }
8552 }
8553 /* Otherwise unlock it. */
8554 else
8555 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8556
8557 /* Free the entry. */
8558 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8559 Assert(pVCpu->iem.s.cActiveMappings != 0);
8560 pVCpu->iem.s.cActiveMappings--;
8561}
8562
8563#endif
8564
8565#ifndef IN_RING3
8566/**
8567 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8568 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8569 *
8570 * Allows the instruction to be completed and retired, while the IEM user will
8571 * return to ring-3 immediately afterwards and do the postponed writes there.
8572 *
8573 * @returns VBox status code (no strict statuses). Caller must check
8574 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8576 * @param pvMem The mapping.
8577 * @param fAccess The kind of access.
8578 */
8579IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8580{
8581 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8582 AssertReturn(iMemMap >= 0, iMemMap);
8583
8584 /* If it's bounce buffered, we may need to write back the buffer. */
8585 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8586 {
8587 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8588 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8589 }
8590 /* Otherwise unlock it. */
8591 else
8592 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8593
8594 /* Free the entry. */
8595 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8596 Assert(pVCpu->iem.s.cActiveMappings != 0);
8597 pVCpu->iem.s.cActiveMappings--;
8598 return VINF_SUCCESS;
8599}
8600#endif
8601
8602
8603/**
8604 * Rollbacks mappings, releasing page locks and such.
8605 *
8606 * The caller shall only call this after checking cActiveMappings.
8607 *
8608 * @returns Strict VBox status code to pass up.
8609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8610 */
8611IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8612{
8613 Assert(pVCpu->iem.s.cActiveMappings > 0);
8614
8615 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8616 while (iMemMap-- > 0)
8617 {
8618 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8619 if (fAccess != IEM_ACCESS_INVALID)
8620 {
8621 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8622 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8623 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8624 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8625 Assert(pVCpu->iem.s.cActiveMappings > 0);
8626 pVCpu->iem.s.cActiveMappings--;
8627 }
8628 }
8629}
8630
8631
8632/**
8633 * Fetches a data byte.
8634 *
8635 * @returns Strict VBox status code.
8636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8637 * @param pu8Dst Where to return the byte.
8638 * @param iSegReg The index of the segment register to use for
8639 * this access. The base and limits are checked.
8640 * @param GCPtrMem The address of the guest memory.
8641 */
8642IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8643{
8644 /* The lazy approach for now... */
8645 uint8_t const *pu8Src;
8646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8647 if (rc == VINF_SUCCESS)
8648 {
8649 *pu8Dst = *pu8Src;
8650 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8651 }
8652 return rc;
8653}
8654
8655
8656#ifdef IEM_WITH_SETJMP
8657/**
8658 * Fetches a data byte, longjmp on error.
8659 *
8660 * @returns The byte.
8661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8662 * @param iSegReg The index of the segment register to use for
8663 * this access. The base and limits are checked.
8664 * @param GCPtrMem The address of the guest memory.
8665 */
8666DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8667{
8668 /* The lazy approach for now... */
8669 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8670 uint8_t const bRet = *pu8Src;
8671 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8672 return bRet;
8673}
8674#endif /* IEM_WITH_SETJMP */
8675
8676
8677/**
8678 * Fetches a data word.
8679 *
8680 * @returns Strict VBox status code.
8681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8682 * @param pu16Dst Where to return the word.
8683 * @param iSegReg The index of the segment register to use for
8684 * this access. The base and limits are checked.
8685 * @param GCPtrMem The address of the guest memory.
8686 */
8687IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8688{
8689 /* The lazy approach for now... */
8690 uint16_t const *pu16Src;
8691 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8692 if (rc == VINF_SUCCESS)
8693 {
8694 *pu16Dst = *pu16Src;
8695 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8696 }
8697 return rc;
8698}
8699
8700
8701#ifdef IEM_WITH_SETJMP
8702/**
8703 * Fetches a data word, longjmp on error.
8704 *
8705 * @returns The word
8706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8707 * @param iSegReg The index of the segment register to use for
8708 * this access. The base and limits are checked.
8709 * @param GCPtrMem The address of the guest memory.
8710 */
8711DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8712{
8713 /* The lazy approach for now... */
8714 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8715 uint16_t const u16Ret = *pu16Src;
8716 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8717 return u16Ret;
8718}
8719#endif
8720
8721
8722/**
8723 * Fetches a data dword.
8724 *
8725 * @returns Strict VBox status code.
8726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8727 * @param pu32Dst Where to return the dword.
8728 * @param iSegReg The index of the segment register to use for
8729 * this access. The base and limits are checked.
8730 * @param GCPtrMem The address of the guest memory.
8731 */
8732IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8733{
8734 /* The lazy approach for now... */
8735 uint32_t const *pu32Src;
8736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8737 if (rc == VINF_SUCCESS)
8738 {
8739 *pu32Dst = *pu32Src;
8740 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8741 }
8742 return rc;
8743}
8744
8745
8746#ifdef IEM_WITH_SETJMP
8747
8748IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8749{
8750 Assert(cbMem >= 1);
8751 Assert(iSegReg < X86_SREG_COUNT);
8752
8753 /*
8754 * 64-bit mode is simpler.
8755 */
8756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8757 {
8758 if (iSegReg >= X86_SREG_FS)
8759 {
8760 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8761 GCPtrMem += pSel->u64Base;
8762 }
8763
8764 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8765 return GCPtrMem;
8766 }
8767 /*
8768 * 16-bit and 32-bit segmentation.
8769 */
8770 else
8771 {
8772 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8773 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8774 == X86DESCATTR_P /* data, expand up */
8775 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8776 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8777 {
8778 /* expand up */
8779 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8780 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8781 && GCPtrLast32 > (uint32_t)GCPtrMem))
8782 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8783 }
8784 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8785 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8786 {
8787 /* expand down */
8788 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8789 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8790 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8791 && GCPtrLast32 > (uint32_t)GCPtrMem))
8792 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8793 }
8794 else
8795 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8796 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8797 }
8798 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8799}
8800
8801
8802IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8803{
8804 Assert(cbMem >= 1);
8805 Assert(iSegReg < X86_SREG_COUNT);
8806
8807 /*
8808 * 64-bit mode is simpler.
8809 */
8810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8811 {
8812 if (iSegReg >= X86_SREG_FS)
8813 {
8814 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8815 GCPtrMem += pSel->u64Base;
8816 }
8817
8818 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8819 return GCPtrMem;
8820 }
8821 /*
8822 * 16-bit and 32-bit segmentation.
8823 */
8824 else
8825 {
8826 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8827 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8828 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8829 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8830 {
8831 /* expand up */
8832 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8833 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8834 && GCPtrLast32 > (uint32_t)GCPtrMem))
8835 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8836 }
8837 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8838 {
8839 /* expand down */
8840 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8841 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8842 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8843 && GCPtrLast32 > (uint32_t)GCPtrMem))
8844 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8845 }
8846 else
8847 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8848 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8849 }
8850 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8851}
8852
8853
8854/**
8855 * Fetches a data dword, longjmp on error, fallback/safe version.
8856 *
8857 * @returns The dword
8858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8859 * @param iSegReg The index of the segment register to use for
8860 * this access. The base and limits are checked.
8861 * @param GCPtrMem The address of the guest memory.
8862 */
8863IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8864{
8865 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8866 uint32_t const u32Ret = *pu32Src;
8867 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8868 return u32Ret;
8869}
8870
8871
8872/**
8873 * Fetches a data dword, longjmp on error.
8874 *
8875 * @returns The dword
8876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8877 * @param iSegReg The index of the segment register to use for
8878 * this access. The base and limits are checked.
8879 * @param GCPtrMem The address of the guest memory.
8880 */
8881DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8882{
8883# ifdef IEM_WITH_DATA_TLB
8884 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8885 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8886 {
8887 /// @todo more later.
8888 }
8889
8890 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8891# else
8892 /* The lazy approach. */
8893 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8894 uint32_t const u32Ret = *pu32Src;
8895 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8896 return u32Ret;
8897# endif
8898}
8899#endif
8900
8901
8902#ifdef SOME_UNUSED_FUNCTION
8903/**
8904 * Fetches a data dword and sign extends it to a qword.
8905 *
8906 * @returns Strict VBox status code.
8907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8908 * @param pu64Dst Where to return the sign extended value.
8909 * @param iSegReg The index of the segment register to use for
8910 * this access. The base and limits are checked.
8911 * @param GCPtrMem The address of the guest memory.
8912 */
8913IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8914{
8915 /* The lazy approach for now... */
8916 int32_t const *pi32Src;
8917 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8918 if (rc == VINF_SUCCESS)
8919 {
8920 *pu64Dst = *pi32Src;
8921 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8922 }
8923#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8924 else
8925 *pu64Dst = 0;
8926#endif
8927 return rc;
8928}
8929#endif
8930
8931
8932/**
8933 * Fetches a data qword.
8934 *
8935 * @returns Strict VBox status code.
8936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8937 * @param pu64Dst Where to return the qword.
8938 * @param iSegReg The index of the segment register to use for
8939 * this access. The base and limits are checked.
8940 * @param GCPtrMem The address of the guest memory.
8941 */
8942IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8943{
8944 /* The lazy approach for now... */
8945 uint64_t const *pu64Src;
8946 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8947 if (rc == VINF_SUCCESS)
8948 {
8949 *pu64Dst = *pu64Src;
8950 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8951 }
8952 return rc;
8953}
8954
8955
8956#ifdef IEM_WITH_SETJMP
8957/**
8958 * Fetches a data qword, longjmp on error.
8959 *
8960 * @returns The qword.
8961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8962 * @param iSegReg The index of the segment register to use for
8963 * this access. The base and limits are checked.
8964 * @param GCPtrMem The address of the guest memory.
8965 */
8966DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8967{
8968 /* The lazy approach for now... */
8969 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8970 uint64_t const u64Ret = *pu64Src;
8971 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8972 return u64Ret;
8973}
8974#endif
8975
8976
8977/**
8978 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8979 *
8980 * @returns Strict VBox status code.
8981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8982 * @param pu64Dst Where to return the qword.
8983 * @param iSegReg The index of the segment register to use for
8984 * this access. The base and limits are checked.
8985 * @param GCPtrMem The address of the guest memory.
8986 */
8987IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8988{
8989 /* The lazy approach for now... */
8990 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8991 if (RT_UNLIKELY(GCPtrMem & 15))
8992 return iemRaiseGeneralProtectionFault0(pVCpu);
8993
8994 uint64_t const *pu64Src;
8995 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8996 if (rc == VINF_SUCCESS)
8997 {
8998 *pu64Dst = *pu64Src;
8999 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9000 }
9001 return rc;
9002}
9003
9004
9005#ifdef IEM_WITH_SETJMP
9006/**
9007 * Fetches a data qword, longjmp on error.
9008 *
9009 * @returns The qword.
9010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9011 * @param iSegReg The index of the segment register to use for
9012 * this access. The base and limits are checked.
9013 * @param GCPtrMem The address of the guest memory.
9014 */
9015DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9016{
9017 /* The lazy approach for now... */
9018 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9019 if (RT_LIKELY(!(GCPtrMem & 15)))
9020 {
9021 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9022 uint64_t const u64Ret = *pu64Src;
9023 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9024 return u64Ret;
9025 }
9026
9027 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9028 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9029}
9030#endif
9031
9032
9033/**
9034 * Fetches a data tword.
9035 *
9036 * @returns Strict VBox status code.
9037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9038 * @param pr80Dst Where to return the tword.
9039 * @param iSegReg The index of the segment register to use for
9040 * this access. The base and limits are checked.
9041 * @param GCPtrMem The address of the guest memory.
9042 */
9043IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9044{
9045 /* The lazy approach for now... */
9046 PCRTFLOAT80U pr80Src;
9047 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9048 if (rc == VINF_SUCCESS)
9049 {
9050 *pr80Dst = *pr80Src;
9051 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9052 }
9053 return rc;
9054}
9055
9056
9057#ifdef IEM_WITH_SETJMP
9058/**
9059 * Fetches a data tword, longjmp on error.
9060 *
9061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9062 * @param pr80Dst Where to return the tword.
9063 * @param iSegReg The index of the segment register to use for
9064 * this access. The base and limits are checked.
9065 * @param GCPtrMem The address of the guest memory.
9066 */
9067DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9068{
9069 /* The lazy approach for now... */
9070 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9071 *pr80Dst = *pr80Src;
9072 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9073}
9074#endif
9075
9076
9077/**
9078 * Fetches a data dqword (double qword), generally SSE related.
9079 *
9080 * @returns Strict VBox status code.
9081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9082 * @param pu128Dst Where to return the qword.
9083 * @param iSegReg The index of the segment register to use for
9084 * this access. The base and limits are checked.
9085 * @param GCPtrMem The address of the guest memory.
9086 */
9087IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9088{
9089 /* The lazy approach for now... */
9090 uint128_t const *pu128Src;
9091 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9092 if (rc == VINF_SUCCESS)
9093 {
9094 *pu128Dst = *pu128Src;
9095 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9096 }
9097 return rc;
9098}
9099
9100
9101#ifdef IEM_WITH_SETJMP
9102/**
9103 * Fetches a data dqword (double qword), generally SSE related.
9104 *
9105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9106 * @param pu128Dst Where to return the qword.
9107 * @param iSegReg The index of the segment register to use for
9108 * this access. The base and limits are checked.
9109 * @param GCPtrMem The address of the guest memory.
9110 */
9111IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9112{
9113 /* The lazy approach for now... */
9114 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9115 *pu128Dst = *pu128Src;
9116 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9117}
9118#endif
9119
9120
9121/**
9122 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9123 * related.
9124 *
9125 * Raises \#GP(0) if not aligned.
9126 *
9127 * @returns Strict VBox status code.
9128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9129 * @param pu128Dst Where to return the qword.
9130 * @param iSegReg The index of the segment register to use for
9131 * this access. The base and limits are checked.
9132 * @param GCPtrMem The address of the guest memory.
9133 */
9134IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9135{
9136 /* The lazy approach for now... */
9137 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9138 if ( (GCPtrMem & 15)
9139 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9140 return iemRaiseGeneralProtectionFault0(pVCpu);
9141
9142 uint128_t const *pu128Src;
9143 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9144 if (rc == VINF_SUCCESS)
9145 {
9146 *pu128Dst = *pu128Src;
9147 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9148 }
9149 return rc;
9150}
9151
9152
9153#ifdef IEM_WITH_SETJMP
9154/**
9155 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9156 * related, longjmp on error.
9157 *
9158 * Raises \#GP(0) if not aligned.
9159 *
9160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9161 * @param pu128Dst Where to return the qword.
9162 * @param iSegReg The index of the segment register to use for
9163 * this access. The base and limits are checked.
9164 * @param GCPtrMem The address of the guest memory.
9165 */
9166DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9167{
9168 /* The lazy approach for now... */
9169 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9170 if ( (GCPtrMem & 15) == 0
9171 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9172 {
9173 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9174 IEM_ACCESS_DATA_R);
9175 *pu128Dst = *pu128Src;
9176 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9177 return;
9178 }
9179
9180 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9181 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9182}
9183#endif
9184
9185
9186
9187/**
9188 * Fetches a descriptor register (lgdt, lidt).
9189 *
9190 * @returns Strict VBox status code.
9191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9192 * @param pcbLimit Where to return the limit.
9193 * @param pGCPtrBase Where to return the base.
9194 * @param iSegReg The index of the segment register to use for
9195 * this access. The base and limits are checked.
9196 * @param GCPtrMem The address of the guest memory.
9197 * @param enmOpSize The effective operand size.
9198 */
9199IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9200 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9201{
9202 /*
9203 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9204 * little special:
9205 * - The two reads are done separately.
9206 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9207 * - We suspect the 386 to actually commit the limit before the base in
9208 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9209 * don't try emulate this eccentric behavior, because it's not well
9210 * enough understood and rather hard to trigger.
9211 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9212 */
9213 VBOXSTRICTRC rcStrict;
9214 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9215 {
9216 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9217 if (rcStrict == VINF_SUCCESS)
9218 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9219 }
9220 else
9221 {
9222 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9223 if (enmOpSize == IEMMODE_32BIT)
9224 {
9225 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9226 {
9227 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9228 if (rcStrict == VINF_SUCCESS)
9229 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9230 }
9231 else
9232 {
9233 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9234 if (rcStrict == VINF_SUCCESS)
9235 {
9236 *pcbLimit = (uint16_t)uTmp;
9237 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9238 }
9239 }
9240 if (rcStrict == VINF_SUCCESS)
9241 *pGCPtrBase = uTmp;
9242 }
9243 else
9244 {
9245 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9246 if (rcStrict == VINF_SUCCESS)
9247 {
9248 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9249 if (rcStrict == VINF_SUCCESS)
9250 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9251 }
9252 }
9253 }
9254 return rcStrict;
9255}
9256
9257
9258
9259/**
9260 * Stores a data byte.
9261 *
9262 * @returns Strict VBox status code.
9263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9264 * @param iSegReg The index of the segment register to use for
9265 * this access. The base and limits are checked.
9266 * @param GCPtrMem The address of the guest memory.
9267 * @param u8Value The value to store.
9268 */
9269IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9270{
9271 /* The lazy approach for now... */
9272 uint8_t *pu8Dst;
9273 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9274 if (rc == VINF_SUCCESS)
9275 {
9276 *pu8Dst = u8Value;
9277 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9278 }
9279 return rc;
9280}
9281
9282
9283#ifdef IEM_WITH_SETJMP
9284/**
9285 * Stores a data byte, longjmp on error.
9286 *
9287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9288 * @param iSegReg The index of the segment register to use for
9289 * this access. The base and limits are checked.
9290 * @param GCPtrMem The address of the guest memory.
9291 * @param u8Value The value to store.
9292 */
9293IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9294{
9295 /* The lazy approach for now... */
9296 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9297 *pu8Dst = u8Value;
9298 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9299}
9300#endif
9301
9302
9303/**
9304 * Stores a data word.
9305 *
9306 * @returns Strict VBox status code.
9307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9308 * @param iSegReg The index of the segment register to use for
9309 * this access. The base and limits are checked.
9310 * @param GCPtrMem The address of the guest memory.
9311 * @param u16Value The value to store.
9312 */
9313IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9314{
9315 /* The lazy approach for now... */
9316 uint16_t *pu16Dst;
9317 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9318 if (rc == VINF_SUCCESS)
9319 {
9320 *pu16Dst = u16Value;
9321 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9322 }
9323 return rc;
9324}
9325
9326
9327#ifdef IEM_WITH_SETJMP
9328/**
9329 * Stores a data word, longjmp on error.
9330 *
9331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9332 * @param iSegReg The index of the segment register to use for
9333 * this access. The base and limits are checked.
9334 * @param GCPtrMem The address of the guest memory.
9335 * @param u16Value The value to store.
9336 */
9337IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9338{
9339 /* The lazy approach for now... */
9340 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9341 *pu16Dst = u16Value;
9342 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9343}
9344#endif
9345
9346
9347/**
9348 * Stores a data dword.
9349 *
9350 * @returns Strict VBox status code.
9351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9352 * @param iSegReg The index of the segment register to use for
9353 * this access. The base and limits are checked.
9354 * @param GCPtrMem The address of the guest memory.
9355 * @param u32Value The value to store.
9356 */
9357IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9358{
9359 /* The lazy approach for now... */
9360 uint32_t *pu32Dst;
9361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9362 if (rc == VINF_SUCCESS)
9363 {
9364 *pu32Dst = u32Value;
9365 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9366 }
9367 return rc;
9368}
9369
9370
9371#ifdef IEM_WITH_SETJMP
9372/**
9373 * Stores a data dword.
9374 *
9375 * @returns Strict VBox status code.
9376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9377 * @param iSegReg The index of the segment register to use for
9378 * this access. The base and limits are checked.
9379 * @param GCPtrMem The address of the guest memory.
9380 * @param u32Value The value to store.
9381 */
9382IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9383{
9384 /* The lazy approach for now... */
9385 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9386 *pu32Dst = u32Value;
9387 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9388}
9389#endif
9390
9391
9392/**
9393 * Stores a data qword.
9394 *
9395 * @returns Strict VBox status code.
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 * @param u64Value The value to store.
9401 */
9402IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9403{
9404 /* The lazy approach for now... */
9405 uint64_t *pu64Dst;
9406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9407 if (rc == VINF_SUCCESS)
9408 {
9409 *pu64Dst = u64Value;
9410 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9411 }
9412 return rc;
9413}
9414
9415
9416#ifdef IEM_WITH_SETJMP
9417/**
9418 * Stores a data qword, longjmp on error.
9419 *
9420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 * @param u64Value The value to store.
9425 */
9426IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9427{
9428 /* The lazy approach for now... */
9429 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9430 *pu64Dst = u64Value;
9431 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9432}
9433#endif
9434
9435
9436/**
9437 * Stores a data dqword.
9438 *
9439 * @returns Strict VBox status code.
9440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9441 * @param iSegReg The index of the segment register to use for
9442 * this access. The base and limits are checked.
9443 * @param GCPtrMem The address of the guest memory.
9444 * @param u128Value The value to store.
9445 */
9446IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9447{
9448 /* The lazy approach for now... */
9449 uint128_t *pu128Dst;
9450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9451 if (rc == VINF_SUCCESS)
9452 {
9453 *pu128Dst = u128Value;
9454 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9455 }
9456 return rc;
9457}
9458
9459
9460#ifdef IEM_WITH_SETJMP
9461/**
9462 * Stores a data dqword, longjmp on error.
9463 *
9464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9465 * @param iSegReg The index of the segment register to use for
9466 * this access. The base and limits are checked.
9467 * @param GCPtrMem The address of the guest memory.
9468 * @param u128Value The value to store.
9469 */
9470IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9471{
9472 /* The lazy approach for now... */
9473 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9474 *pu128Dst = u128Value;
9475 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9476}
9477#endif
9478
9479
9480/**
9481 * Stores a data dqword, SSE aligned.
9482 *
9483 * @returns Strict VBox status code.
9484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9485 * @param iSegReg The index of the segment register to use for
9486 * this access. The base and limits are checked.
9487 * @param GCPtrMem The address of the guest memory.
9488 * @param u128Value The value to store.
9489 */
9490IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9491{
9492 /* The lazy approach for now... */
9493 if ( (GCPtrMem & 15)
9494 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9495 return iemRaiseGeneralProtectionFault0(pVCpu);
9496
9497 uint128_t *pu128Dst;
9498 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9499 if (rc == VINF_SUCCESS)
9500 {
9501 *pu128Dst = u128Value;
9502 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9503 }
9504 return rc;
9505}
9506
9507
9508#ifdef IEM_WITH_SETJMP
9509/**
9510 * Stores a data dqword, SSE aligned.
9511 *
9512 * @returns Strict VBox status code.
9513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9514 * @param iSegReg The index of the segment register to use for
9515 * this access. The base and limits are checked.
9516 * @param GCPtrMem The address of the guest memory.
9517 * @param u128Value The value to store.
9518 */
9519DECL_NO_INLINE(IEM_STATIC, void)
9520iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9521{
9522 /* The lazy approach for now... */
9523 if ( (GCPtrMem & 15) == 0
9524 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9525 {
9526 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9527 *pu128Dst = u128Value;
9528 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9529 return;
9530 }
9531
9532 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9533 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9534}
9535#endif
9536
9537
9538/**
9539 * Stores a descriptor register (sgdt, sidt).
9540 *
9541 * @returns Strict VBox status code.
9542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9543 * @param cbLimit The limit.
9544 * @param GCPtrBase The base address.
9545 * @param iSegReg The index of the segment register to use for
9546 * this access. The base and limits are checked.
9547 * @param GCPtrMem The address of the guest memory.
9548 */
9549IEM_STATIC VBOXSTRICTRC
9550iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9551{
9552 /*
9553 * The SIDT and SGDT instructions actually stores the data using two
9554 * independent writes. The instructions does not respond to opsize prefixes.
9555 */
9556 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9557 if (rcStrict == VINF_SUCCESS)
9558 {
9559 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9560 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9561 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9562 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9563 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9564 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9565 else
9566 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9567 }
9568 return rcStrict;
9569}
9570
9571
9572/**
9573 * Pushes a word onto the stack.
9574 *
9575 * @returns Strict VBox status code.
9576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9577 * @param u16Value The value to push.
9578 */
9579IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9580{
9581 /* Increment the stack pointer. */
9582 uint64_t uNewRsp;
9583 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9584 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9585
9586 /* Write the word the lazy way. */
9587 uint16_t *pu16Dst;
9588 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9589 if (rc == VINF_SUCCESS)
9590 {
9591 *pu16Dst = u16Value;
9592 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9593 }
9594
9595 /* Commit the new RSP value unless we an access handler made trouble. */
9596 if (rc == VINF_SUCCESS)
9597 pCtx->rsp = uNewRsp;
9598
9599 return rc;
9600}
9601
9602
9603/**
9604 * Pushes a dword onto the stack.
9605 *
9606 * @returns Strict VBox status code.
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param u32Value The value to push.
9609 */
9610IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9611{
9612 /* Increment the stack pointer. */
9613 uint64_t uNewRsp;
9614 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9615 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9616
9617 /* Write the dword the lazy way. */
9618 uint32_t *pu32Dst;
9619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9620 if (rc == VINF_SUCCESS)
9621 {
9622 *pu32Dst = u32Value;
9623 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9624 }
9625
9626 /* Commit the new RSP value unless we an access handler made trouble. */
9627 if (rc == VINF_SUCCESS)
9628 pCtx->rsp = uNewRsp;
9629
9630 return rc;
9631}
9632
9633
9634/**
9635 * Pushes a dword segment register value onto the stack.
9636 *
9637 * @returns Strict VBox status code.
9638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9639 * @param u32Value The value to push.
9640 */
9641IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9642{
9643 /* Increment the stack pointer. */
9644 uint64_t uNewRsp;
9645 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9646 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9647
9648 VBOXSTRICTRC rc;
9649 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9650 {
9651 /* The recompiler writes a full dword. */
9652 uint32_t *pu32Dst;
9653 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9654 if (rc == VINF_SUCCESS)
9655 {
9656 *pu32Dst = u32Value;
9657 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9658 }
9659 }
9660 else
9661 {
9662 /* The intel docs talks about zero extending the selector register
9663 value. My actual intel CPU here might be zero extending the value
9664 but it still only writes the lower word... */
9665 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9666 * happens when crossing an electric page boundrary, is the high word checked
9667 * for write accessibility or not? Probably it is. What about segment limits?
9668 * It appears this behavior is also shared with trap error codes.
9669 *
9670 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9671 * ancient hardware when it actually did change. */
9672 uint16_t *pu16Dst;
9673 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9674 if (rc == VINF_SUCCESS)
9675 {
9676 *pu16Dst = (uint16_t)u32Value;
9677 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9678 }
9679 }
9680
9681 /* Commit the new RSP value unless we an access handler made trouble. */
9682 if (rc == VINF_SUCCESS)
9683 pCtx->rsp = uNewRsp;
9684
9685 return rc;
9686}
9687
9688
9689/**
9690 * Pushes a qword onto the stack.
9691 *
9692 * @returns Strict VBox status code.
9693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9694 * @param u64Value The value to push.
9695 */
9696IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9697{
9698 /* Increment the stack pointer. */
9699 uint64_t uNewRsp;
9700 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9701 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9702
9703 /* Write the word the lazy way. */
9704 uint64_t *pu64Dst;
9705 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9706 if (rc == VINF_SUCCESS)
9707 {
9708 *pu64Dst = u64Value;
9709 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9710 }
9711
9712 /* Commit the new RSP value unless we an access handler made trouble. */
9713 if (rc == VINF_SUCCESS)
9714 pCtx->rsp = uNewRsp;
9715
9716 return rc;
9717}
9718
9719
9720/**
9721 * Pops a word from the stack.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9725 * @param pu16Value Where to store the popped value.
9726 */
9727IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9728{
9729 /* Increment the stack pointer. */
9730 uint64_t uNewRsp;
9731 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9732 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9733
9734 /* Write the word the lazy way. */
9735 uint16_t const *pu16Src;
9736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9737 if (rc == VINF_SUCCESS)
9738 {
9739 *pu16Value = *pu16Src;
9740 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9741
9742 /* Commit the new RSP value. */
9743 if (rc == VINF_SUCCESS)
9744 pCtx->rsp = uNewRsp;
9745 }
9746
9747 return rc;
9748}
9749
9750
9751/**
9752 * Pops a dword from the stack.
9753 *
9754 * @returns Strict VBox status code.
9755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9756 * @param pu32Value Where to store the popped value.
9757 */
9758IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9759{
9760 /* Increment the stack pointer. */
9761 uint64_t uNewRsp;
9762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9763 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9764
9765 /* Write the word the lazy way. */
9766 uint32_t const *pu32Src;
9767 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9768 if (rc == VINF_SUCCESS)
9769 {
9770 *pu32Value = *pu32Src;
9771 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9772
9773 /* Commit the new RSP value. */
9774 if (rc == VINF_SUCCESS)
9775 pCtx->rsp = uNewRsp;
9776 }
9777
9778 return rc;
9779}
9780
9781
9782/**
9783 * Pops a qword from the stack.
9784 *
9785 * @returns Strict VBox status code.
9786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9787 * @param pu64Value Where to store the popped value.
9788 */
9789IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9790{
9791 /* Increment the stack pointer. */
9792 uint64_t uNewRsp;
9793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9794 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9795
9796 /* Write the word the lazy way. */
9797 uint64_t const *pu64Src;
9798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9799 if (rc == VINF_SUCCESS)
9800 {
9801 *pu64Value = *pu64Src;
9802 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9803
9804 /* Commit the new RSP value. */
9805 if (rc == VINF_SUCCESS)
9806 pCtx->rsp = uNewRsp;
9807 }
9808
9809 return rc;
9810}
9811
9812
9813/**
9814 * Pushes a word onto the stack, using a temporary stack pointer.
9815 *
9816 * @returns Strict VBox status code.
9817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9818 * @param u16Value The value to push.
9819 * @param pTmpRsp Pointer to the temporary stack pointer.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9822{
9823 /* Increment the stack pointer. */
9824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9825 RTUINT64U NewRsp = *pTmpRsp;
9826 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9827
9828 /* Write the word the lazy way. */
9829 uint16_t *pu16Dst;
9830 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9831 if (rc == VINF_SUCCESS)
9832 {
9833 *pu16Dst = u16Value;
9834 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9835 }
9836
9837 /* Commit the new RSP value unless we an access handler made trouble. */
9838 if (rc == VINF_SUCCESS)
9839 *pTmpRsp = NewRsp;
9840
9841 return rc;
9842}
9843
9844
9845/**
9846 * Pushes a dword onto the stack, using a temporary stack pointer.
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9850 * @param u32Value The value to push.
9851 * @param pTmpRsp Pointer to the temporary stack pointer.
9852 */
9853IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9854{
9855 /* Increment the stack pointer. */
9856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9857 RTUINT64U NewRsp = *pTmpRsp;
9858 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9859
9860 /* Write the word the lazy way. */
9861 uint32_t *pu32Dst;
9862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9863 if (rc == VINF_SUCCESS)
9864 {
9865 *pu32Dst = u32Value;
9866 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9867 }
9868
9869 /* Commit the new RSP value unless we an access handler made trouble. */
9870 if (rc == VINF_SUCCESS)
9871 *pTmpRsp = NewRsp;
9872
9873 return rc;
9874}
9875
9876
9877/**
9878 * Pushes a dword onto the stack, using a temporary stack pointer.
9879 *
9880 * @returns Strict VBox status code.
9881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9882 * @param u64Value The value to push.
9883 * @param pTmpRsp Pointer to the temporary stack pointer.
9884 */
9885IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9886{
9887 /* Increment the stack pointer. */
9888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9889 RTUINT64U NewRsp = *pTmpRsp;
9890 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9891
9892 /* Write the word the lazy way. */
9893 uint64_t *pu64Dst;
9894 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9895 if (rc == VINF_SUCCESS)
9896 {
9897 *pu64Dst = u64Value;
9898 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9899 }
9900
9901 /* Commit the new RSP value unless we an access handler made trouble. */
9902 if (rc == VINF_SUCCESS)
9903 *pTmpRsp = NewRsp;
9904
9905 return rc;
9906}
9907
9908
9909/**
9910 * Pops a word from the stack, using a temporary stack pointer.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param pu16Value Where to store the popped value.
9915 * @param pTmpRsp Pointer to the temporary stack pointer.
9916 */
9917IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9918{
9919 /* Increment the stack pointer. */
9920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9921 RTUINT64U NewRsp = *pTmpRsp;
9922 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9923
9924 /* Write the word the lazy way. */
9925 uint16_t const *pu16Src;
9926 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9927 if (rc == VINF_SUCCESS)
9928 {
9929 *pu16Value = *pu16Src;
9930 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9931
9932 /* Commit the new RSP value. */
9933 if (rc == VINF_SUCCESS)
9934 *pTmpRsp = NewRsp;
9935 }
9936
9937 return rc;
9938}
9939
9940
9941/**
9942 * Pops a dword from the stack, using a temporary stack pointer.
9943 *
9944 * @returns Strict VBox status code.
9945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9946 * @param pu32Value Where to store the popped value.
9947 * @param pTmpRsp Pointer to the temporary stack pointer.
9948 */
9949IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9950{
9951 /* Increment the stack pointer. */
9952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9953 RTUINT64U NewRsp = *pTmpRsp;
9954 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9955
9956 /* Write the word the lazy way. */
9957 uint32_t const *pu32Src;
9958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9959 if (rc == VINF_SUCCESS)
9960 {
9961 *pu32Value = *pu32Src;
9962 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9963
9964 /* Commit the new RSP value. */
9965 if (rc == VINF_SUCCESS)
9966 *pTmpRsp = NewRsp;
9967 }
9968
9969 return rc;
9970}
9971
9972
9973/**
9974 * Pops a qword from the stack, using a temporary stack pointer.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param pu64Value Where to store the popped value.
9979 * @param pTmpRsp Pointer to the temporary stack pointer.
9980 */
9981IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9982{
9983 /* Increment the stack pointer. */
9984 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9985 RTUINT64U NewRsp = *pTmpRsp;
9986 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9987
9988 /* Write the word the lazy way. */
9989 uint64_t const *pu64Src;
9990 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9991 if (rcStrict == VINF_SUCCESS)
9992 {
9993 *pu64Value = *pu64Src;
9994 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9995
9996 /* Commit the new RSP value. */
9997 if (rcStrict == VINF_SUCCESS)
9998 *pTmpRsp = NewRsp;
9999 }
10000
10001 return rcStrict;
10002}
10003
10004
10005/**
10006 * Begin a special stack push (used by interrupt, exceptions and such).
10007 *
10008 * This will raise \#SS or \#PF if appropriate.
10009 *
10010 * @returns Strict VBox status code.
10011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10012 * @param cbMem The number of bytes to push onto the stack.
10013 * @param ppvMem Where to return the pointer to the stack memory.
10014 * As with the other memory functions this could be
10015 * direct access or bounce buffered access, so
10016 * don't commit register until the commit call
10017 * succeeds.
10018 * @param puNewRsp Where to return the new RSP value. This must be
10019 * passed unchanged to
10020 * iemMemStackPushCommitSpecial().
10021 */
10022IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10023{
10024 Assert(cbMem < UINT8_MAX);
10025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10026 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10027 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10028}
10029
10030
10031/**
10032 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10033 *
10034 * This will update the rSP.
10035 *
10036 * @returns Strict VBox status code.
10037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10038 * @param pvMem The pointer returned by
10039 * iemMemStackPushBeginSpecial().
10040 * @param uNewRsp The new RSP value returned by
10041 * iemMemStackPushBeginSpecial().
10042 */
10043IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10044{
10045 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10046 if (rcStrict == VINF_SUCCESS)
10047 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10048 return rcStrict;
10049}
10050
10051
10052/**
10053 * Begin a special stack pop (used by iret, retf and such).
10054 *
10055 * This will raise \#SS or \#PF if appropriate.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param cbMem The number of bytes to pop from the stack.
10060 * @param ppvMem Where to return the pointer to the stack memory.
10061 * @param puNewRsp Where to return the new RSP value. This must be
10062 * assigned to CPUMCTX::rsp manually some time
10063 * after iemMemStackPopDoneSpecial() has been
10064 * called.
10065 */
10066IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10067{
10068 Assert(cbMem < UINT8_MAX);
10069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10070 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10071 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10072}
10073
10074
10075/**
10076 * Continue a special stack pop (used by iret and retf).
10077 *
10078 * This will raise \#SS or \#PF if appropriate.
10079 *
10080 * @returns Strict VBox status code.
10081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10082 * @param cbMem The number of bytes to pop from the stack.
10083 * @param ppvMem Where to return the pointer to the stack memory.
10084 * @param puNewRsp Where to return the new RSP value. This must be
10085 * assigned to CPUMCTX::rsp manually some time
10086 * after iemMemStackPopDoneSpecial() has been
10087 * called.
10088 */
10089IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10090{
10091 Assert(cbMem < UINT8_MAX);
10092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10093 RTUINT64U NewRsp;
10094 NewRsp.u = *puNewRsp;
10095 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10096 *puNewRsp = NewRsp.u;
10097 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10098}
10099
10100
10101/**
10102 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10103 * iemMemStackPopContinueSpecial).
10104 *
10105 * The caller will manually commit the rSP.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param pvMem The pointer returned by
10110 * iemMemStackPopBeginSpecial() or
10111 * iemMemStackPopContinueSpecial().
10112 */
10113IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10114{
10115 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10116}
10117
10118
10119/**
10120 * Fetches a system table byte.
10121 *
10122 * @returns Strict VBox status code.
10123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10124 * @param pbDst Where to return the byte.
10125 * @param iSegReg The index of the segment register to use for
10126 * this access. The base and limits are checked.
10127 * @param GCPtrMem The address of the guest memory.
10128 */
10129IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10130{
10131 /* The lazy approach for now... */
10132 uint8_t const *pbSrc;
10133 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10134 if (rc == VINF_SUCCESS)
10135 {
10136 *pbDst = *pbSrc;
10137 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10138 }
10139 return rc;
10140}
10141
10142
10143/**
10144 * Fetches a system table word.
10145 *
10146 * @returns Strict VBox status code.
10147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10148 * @param pu16Dst Where to return the word.
10149 * @param iSegReg The index of the segment register to use for
10150 * this access. The base and limits are checked.
10151 * @param GCPtrMem The address of the guest memory.
10152 */
10153IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10154{
10155 /* The lazy approach for now... */
10156 uint16_t const *pu16Src;
10157 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10158 if (rc == VINF_SUCCESS)
10159 {
10160 *pu16Dst = *pu16Src;
10161 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10162 }
10163 return rc;
10164}
10165
10166
10167/**
10168 * Fetches a system table dword.
10169 *
10170 * @returns Strict VBox status code.
10171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10172 * @param pu32Dst Where to return the dword.
10173 * @param iSegReg The index of the segment register to use for
10174 * this access. The base and limits are checked.
10175 * @param GCPtrMem The address of the guest memory.
10176 */
10177IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10178{
10179 /* The lazy approach for now... */
10180 uint32_t const *pu32Src;
10181 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10182 if (rc == VINF_SUCCESS)
10183 {
10184 *pu32Dst = *pu32Src;
10185 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10186 }
10187 return rc;
10188}
10189
10190
10191/**
10192 * Fetches a system table qword.
10193 *
10194 * @returns Strict VBox status code.
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param pu64Dst Where to return the qword.
10197 * @param iSegReg The index of the segment register to use for
10198 * this access. The base and limits are checked.
10199 * @param GCPtrMem The address of the guest memory.
10200 */
10201IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10202{
10203 /* The lazy approach for now... */
10204 uint64_t const *pu64Src;
10205 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10206 if (rc == VINF_SUCCESS)
10207 {
10208 *pu64Dst = *pu64Src;
10209 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10210 }
10211 return rc;
10212}
10213
10214
10215/**
10216 * Fetches a descriptor table entry with caller specified error code.
10217 *
10218 * @returns Strict VBox status code.
10219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10220 * @param pDesc Where to return the descriptor table entry.
10221 * @param uSel The selector which table entry to fetch.
10222 * @param uXcpt The exception to raise on table lookup error.
10223 * @param uErrorCode The error code associated with the exception.
10224 */
10225IEM_STATIC VBOXSTRICTRC
10226iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10227{
10228 AssertPtr(pDesc);
10229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10230
10231 /** @todo did the 286 require all 8 bytes to be accessible? */
10232 /*
10233 * Get the selector table base and check bounds.
10234 */
10235 RTGCPTR GCPtrBase;
10236 if (uSel & X86_SEL_LDT)
10237 {
10238 if ( !pCtx->ldtr.Attr.n.u1Present
10239 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10240 {
10241 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10242 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10243 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10244 uErrorCode, 0);
10245 }
10246
10247 Assert(pCtx->ldtr.Attr.n.u1Present);
10248 GCPtrBase = pCtx->ldtr.u64Base;
10249 }
10250 else
10251 {
10252 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10253 {
10254 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10255 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10256 uErrorCode, 0);
10257 }
10258 GCPtrBase = pCtx->gdtr.pGdt;
10259 }
10260
10261 /*
10262 * Read the legacy descriptor and maybe the long mode extensions if
10263 * required.
10264 */
10265 VBOXSTRICTRC rcStrict;
10266 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10267 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10268 else
10269 {
10270 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10271 if (rcStrict != VINF_SUCCESS)
10272 return rcStrict;
10273 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10274 if (rcStrict != VINF_SUCCESS)
10275 return rcStrict;
10276 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10277 pDesc->Legacy.au16[3] = 0;
10278 }
10279
10280 if (rcStrict == VINF_SUCCESS)
10281 {
10282 if ( !IEM_IS_LONG_MODE(pVCpu)
10283 || pDesc->Legacy.Gen.u1DescType)
10284 pDesc->Long.au64[1] = 0;
10285 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10286 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10287 else
10288 {
10289 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10290 /** @todo is this the right exception? */
10291 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10292 }
10293 }
10294 return rcStrict;
10295}
10296
10297
10298/**
10299 * Fetches a descriptor table entry.
10300 *
10301 * @returns Strict VBox status code.
10302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10303 * @param pDesc Where to return the descriptor table entry.
10304 * @param uSel The selector which table entry to fetch.
10305 * @param uXcpt The exception to raise on table lookup error.
10306 */
10307IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10308{
10309 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10310}
10311
10312
10313/**
10314 * Fakes a long mode stack selector for SS = 0.
10315 *
10316 * @param pDescSs Where to return the fake stack descriptor.
10317 * @param uDpl The DPL we want.
10318 */
10319IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10320{
10321 pDescSs->Long.au64[0] = 0;
10322 pDescSs->Long.au64[1] = 0;
10323 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10324 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10325 pDescSs->Long.Gen.u2Dpl = uDpl;
10326 pDescSs->Long.Gen.u1Present = 1;
10327 pDescSs->Long.Gen.u1Long = 1;
10328}
10329
10330
10331/**
10332 * Marks the selector descriptor as accessed (only non-system descriptors).
10333 *
10334 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10335 * will therefore skip the limit checks.
10336 *
10337 * @returns Strict VBox status code.
10338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10339 * @param uSel The selector.
10340 */
10341IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10342{
10343 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10344
10345 /*
10346 * Get the selector table base and calculate the entry address.
10347 */
10348 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10349 ? pCtx->ldtr.u64Base
10350 : pCtx->gdtr.pGdt;
10351 GCPtr += uSel & X86_SEL_MASK;
10352
10353 /*
10354 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10355 * ugly stuff to avoid this. This will make sure it's an atomic access
10356 * as well more or less remove any question about 8-bit or 32-bit accesss.
10357 */
10358 VBOXSTRICTRC rcStrict;
10359 uint32_t volatile *pu32;
10360 if ((GCPtr & 3) == 0)
10361 {
10362 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10363 GCPtr += 2 + 2;
10364 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10365 if (rcStrict != VINF_SUCCESS)
10366 return rcStrict;
10367 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10368 }
10369 else
10370 {
10371 /* The misaligned GDT/LDT case, map the whole thing. */
10372 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10373 if (rcStrict != VINF_SUCCESS)
10374 return rcStrict;
10375 switch ((uintptr_t)pu32 & 3)
10376 {
10377 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10378 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10379 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10380 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10381 }
10382 }
10383
10384 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10385}
10386
10387/** @} */
10388
10389
10390/*
10391 * Include the C/C++ implementation of instruction.
10392 */
10393#include "IEMAllCImpl.cpp.h"
10394
10395
10396
10397/** @name "Microcode" macros.
10398 *
10399 * The idea is that we should be able to use the same code to interpret
10400 * instructions as well as recompiler instructions. Thus this obfuscation.
10401 *
10402 * @{
10403 */
10404#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10405#define IEM_MC_END() }
10406#define IEM_MC_PAUSE() do {} while (0)
10407#define IEM_MC_CONTINUE() do {} while (0)
10408
10409/** Internal macro. */
10410#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10411 do \
10412 { \
10413 VBOXSTRICTRC rcStrict2 = a_Expr; \
10414 if (rcStrict2 != VINF_SUCCESS) \
10415 return rcStrict2; \
10416 } while (0)
10417
10418
10419#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10420#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10421#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10422#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10423#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10424#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10425#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10426#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10427#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10428 do { \
10429 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10430 return iemRaiseDeviceNotAvailable(pVCpu); \
10431 } while (0)
10432#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10433 do { \
10434 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10435 return iemRaiseMathFault(pVCpu); \
10436 } while (0)
10437#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10438 do { \
10439 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10440 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10441 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10442 return iemRaiseUndefinedOpcode(pVCpu); \
10443 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10444 return iemRaiseDeviceNotAvailable(pVCpu); \
10445 } while (0)
10446#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10447 do { \
10448 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10449 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10450 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10451 return iemRaiseUndefinedOpcode(pVCpu); \
10452 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10453 return iemRaiseDeviceNotAvailable(pVCpu); \
10454 } while (0)
10455#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10456 do { \
10457 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10458 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10459 return iemRaiseUndefinedOpcode(pVCpu); \
10460 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10461 return iemRaiseDeviceNotAvailable(pVCpu); \
10462 } while (0)
10463#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10464 do { \
10465 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10466 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10467 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10468 return iemRaiseUndefinedOpcode(pVCpu); \
10469 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10470 return iemRaiseDeviceNotAvailable(pVCpu); \
10471 } while (0)
10472#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10473 do { \
10474 if (pVCpu->iem.s.uCpl != 0) \
10475 return iemRaiseGeneralProtectionFault0(pVCpu); \
10476 } while (0)
10477
10478
10479#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10480#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10481#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10482#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10483#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10484#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10485#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10486 uint32_t a_Name; \
10487 uint32_t *a_pName = &a_Name
10488#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10489 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10490
10491#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10492#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10493
10494#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10495#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10496#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10497#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10498#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10499#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10500#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10501#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10502#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10503#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10504#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10505#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10506#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10507#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10508#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10509#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10510#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10511#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10512#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10513#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10514#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10515#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10516#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10517#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10518#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10519#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10520#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10521#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10522#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10523/** @note Not for IOPL or IF testing or modification. */
10524#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10525#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10526#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10527#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10528
10529#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10530#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10531#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10532#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10533#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10534#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10535#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10536#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10537#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10538#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10539#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10540 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10541
10542#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10543#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10544/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10545 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10546#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10547#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10548/** @note Not for IOPL or IF testing or modification. */
10549#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10550
10551#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10552#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10553#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10554 do { \
10555 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10556 *pu32Reg += (a_u32Value); \
10557 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10558 } while (0)
10559#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10560
10561#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10562#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10563#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10564 do { \
10565 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10566 *pu32Reg -= (a_u32Value); \
10567 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10568 } while (0)
10569#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10570#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10571
10572#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10573#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10574#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10575#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10576#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10577#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10578#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10579
10580#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10581#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10582#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10583#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10584
10585#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10586#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10587#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10588
10589#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10590#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10591#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10592
10593#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10594#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10595#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10596
10597#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10598#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10599#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10600
10601#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10602
10603#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10604
10605#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10606#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10607#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10608 do { \
10609 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10610 *pu32Reg &= (a_u32Value); \
10611 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10612 } while (0)
10613#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10614
10615#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10616#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10617#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10618 do { \
10619 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10620 *pu32Reg |= (a_u32Value); \
10621 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10622 } while (0)
10623#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10624
10625
10626/** @note Not for IOPL or IF modification. */
10627#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10628/** @note Not for IOPL or IF modification. */
10629#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10630/** @note Not for IOPL or IF modification. */
10631#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10632
10633#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10634
10635
10636#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10637 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10638#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10639 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10640#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10641 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10642#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10643 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10644#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10645 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10646#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10647 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10648#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10649 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10650
10651#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10652 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10653#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10654 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10655#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10656 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10657#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10658 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10659#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10660 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10661#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10662 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10663 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10664 } while (0)
10665#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10666 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10667 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10668 } while (0)
10669#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10670 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10671#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10672 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10673#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10674 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10675#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10676 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10677 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10678
10679#ifndef IEM_WITH_SETJMP
10680# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10682# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10684# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10686#else
10687# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10688 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10689# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10690 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10691# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10692 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10693#endif
10694
10695#ifndef IEM_WITH_SETJMP
10696# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10698# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10700# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10702#else
10703# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10704 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10705# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10706 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10707# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10708 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10709#endif
10710
10711#ifndef IEM_WITH_SETJMP
10712# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10714# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10716# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10718#else
10719# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10720 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10721# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10722 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10723# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10724 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10725#endif
10726
10727#ifdef SOME_UNUSED_FUNCTION
10728# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10730#endif
10731
10732#ifndef IEM_WITH_SETJMP
10733# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10735# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10737# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10739# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10741#else
10742# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10743 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10744# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10745 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10746# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10747 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10748# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10749 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10750#endif
10751
10752#ifndef IEM_WITH_SETJMP
10753# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10755# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10757# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10759#else
10760# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10761 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10762# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10763 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10764# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10765 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10766#endif
10767
10768#ifndef IEM_WITH_SETJMP
10769# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10771# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10773#else
10774# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10775 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10776# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10777 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10778#endif
10779
10780
10781
10782#ifndef IEM_WITH_SETJMP
10783# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10784 do { \
10785 uint8_t u8Tmp; \
10786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10787 (a_u16Dst) = u8Tmp; \
10788 } while (0)
10789# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10790 do { \
10791 uint8_t u8Tmp; \
10792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10793 (a_u32Dst) = u8Tmp; \
10794 } while (0)
10795# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10796 do { \
10797 uint8_t u8Tmp; \
10798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10799 (a_u64Dst) = u8Tmp; \
10800 } while (0)
10801# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10802 do { \
10803 uint16_t u16Tmp; \
10804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10805 (a_u32Dst) = u16Tmp; \
10806 } while (0)
10807# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10808 do { \
10809 uint16_t u16Tmp; \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10811 (a_u64Dst) = u16Tmp; \
10812 } while (0)
10813# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10814 do { \
10815 uint32_t u32Tmp; \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10817 (a_u64Dst) = u32Tmp; \
10818 } while (0)
10819#else /* IEM_WITH_SETJMP */
10820# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10821 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10822# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10823 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10824# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10825 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10826# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10827 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10828# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10829 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10830# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10831 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10832#endif /* IEM_WITH_SETJMP */
10833
10834#ifndef IEM_WITH_SETJMP
10835# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10836 do { \
10837 uint8_t u8Tmp; \
10838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10839 (a_u16Dst) = (int8_t)u8Tmp; \
10840 } while (0)
10841# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10842 do { \
10843 uint8_t u8Tmp; \
10844 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10845 (a_u32Dst) = (int8_t)u8Tmp; \
10846 } while (0)
10847# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10848 do { \
10849 uint8_t u8Tmp; \
10850 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10851 (a_u64Dst) = (int8_t)u8Tmp; \
10852 } while (0)
10853# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10854 do { \
10855 uint16_t u16Tmp; \
10856 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10857 (a_u32Dst) = (int16_t)u16Tmp; \
10858 } while (0)
10859# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10860 do { \
10861 uint16_t u16Tmp; \
10862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10863 (a_u64Dst) = (int16_t)u16Tmp; \
10864 } while (0)
10865# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10866 do { \
10867 uint32_t u32Tmp; \
10868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10869 (a_u64Dst) = (int32_t)u32Tmp; \
10870 } while (0)
10871#else /* IEM_WITH_SETJMP */
10872# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10873 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10874# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10875 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10876# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10877 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10878# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10879 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10880# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10881 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10882# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10883 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10884#endif /* IEM_WITH_SETJMP */
10885
10886#ifndef IEM_WITH_SETJMP
10887# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10889# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10891# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10892 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10893# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10894 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10895#else
10896# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10897 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10898# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10899 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10900# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10901 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10902# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10903 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10904#endif
10905
10906#ifndef IEM_WITH_SETJMP
10907# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10908 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10909# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10911# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10913# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10915#else
10916# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10917 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10918# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10919 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10920# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10921 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10922# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10923 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10924#endif
10925
10926#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10927#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10928#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10929#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10930#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10931#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10932#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10933 do { \
10934 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10935 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10936 } while (0)
10937
10938#ifndef IEM_WITH_SETJMP
10939# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10940 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10941# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10942 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10943#else
10944# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10945 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10946# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10947 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10948#endif
10949
10950
10951#define IEM_MC_PUSH_U16(a_u16Value) \
10952 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10953#define IEM_MC_PUSH_U32(a_u32Value) \
10954 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10955#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10956 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10957#define IEM_MC_PUSH_U64(a_u64Value) \
10958 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10959
10960#define IEM_MC_POP_U16(a_pu16Value) \
10961 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10962#define IEM_MC_POP_U32(a_pu32Value) \
10963 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10964#define IEM_MC_POP_U64(a_pu64Value) \
10965 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10966
10967/** Maps guest memory for direct or bounce buffered access.
10968 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10969 * @remarks May return.
10970 */
10971#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10972 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10973
10974/** Maps guest memory for direct or bounce buffered access.
10975 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10976 * @remarks May return.
10977 */
10978#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10979 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10980
10981/** Commits the memory and unmaps the guest memory.
10982 * @remarks May return.
10983 */
10984#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10985 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10986
10987/** Commits the memory and unmaps the guest memory unless the FPU status word
10988 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10989 * that would cause FLD not to store.
10990 *
10991 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10992 * store, while \#P will not.
10993 *
10994 * @remarks May in theory return - for now.
10995 */
10996#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10997 do { \
10998 if ( !(a_u16FSW & X86_FSW_ES) \
10999 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11000 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11001 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11002 } while (0)
11003
11004/** Calculate efficient address from R/M. */
11005#ifndef IEM_WITH_SETJMP
11006# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11007 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11008#else
11009# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11010 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11011#endif
11012
11013#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11014#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11015#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11016#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11017#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11018#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11019#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11020
11021/**
11022 * Defers the rest of the instruction emulation to a C implementation routine
11023 * and returns, only taking the standard parameters.
11024 *
11025 * @param a_pfnCImpl The pointer to the C routine.
11026 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11027 */
11028#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11029
11030/**
11031 * Defers the rest of instruction emulation to a C implementation routine and
11032 * returns, taking one argument in addition to the standard ones.
11033 *
11034 * @param a_pfnCImpl The pointer to the C routine.
11035 * @param a0 The argument.
11036 */
11037#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11038
11039/**
11040 * Defers the rest of the instruction emulation to a C implementation routine
11041 * and returns, taking two arguments in addition to the standard ones.
11042 *
11043 * @param a_pfnCImpl The pointer to the C routine.
11044 * @param a0 The first extra argument.
11045 * @param a1 The second extra argument.
11046 */
11047#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11048
11049/**
11050 * Defers the rest of the instruction emulation to a C implementation routine
11051 * and returns, taking three arguments in addition to the standard ones.
11052 *
11053 * @param a_pfnCImpl The pointer to the C routine.
11054 * @param a0 The first extra argument.
11055 * @param a1 The second extra argument.
11056 * @param a2 The third extra argument.
11057 */
11058#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11059
11060/**
11061 * Defers the rest of the instruction emulation to a C implementation routine
11062 * and returns, taking four arguments in addition to the standard ones.
11063 *
11064 * @param a_pfnCImpl The pointer to the C routine.
11065 * @param a0 The first extra argument.
11066 * @param a1 The second extra argument.
11067 * @param a2 The third extra argument.
11068 * @param a3 The fourth extra argument.
11069 */
11070#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11071
11072/**
11073 * Defers the rest of the instruction emulation to a C implementation routine
11074 * and returns, taking two arguments in addition to the standard ones.
11075 *
11076 * @param a_pfnCImpl The pointer to the C routine.
11077 * @param a0 The first extra argument.
11078 * @param a1 The second extra argument.
11079 * @param a2 The third extra argument.
11080 * @param a3 The fourth extra argument.
11081 * @param a4 The fifth extra argument.
11082 */
11083#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11084
11085/**
11086 * Defers the entire instruction emulation to a C implementation routine and
11087 * returns, only taking the standard parameters.
11088 *
11089 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11090 *
11091 * @param a_pfnCImpl The pointer to the C routine.
11092 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11093 */
11094#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11095
11096/**
11097 * Defers the entire instruction emulation to a C implementation routine and
11098 * returns, taking one argument in addition to the standard ones.
11099 *
11100 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11101 *
11102 * @param a_pfnCImpl The pointer to the C routine.
11103 * @param a0 The argument.
11104 */
11105#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11106
11107/**
11108 * Defers the entire instruction emulation to a C implementation routine and
11109 * returns, taking two arguments in addition to the standard ones.
11110 *
11111 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11112 *
11113 * @param a_pfnCImpl The pointer to the C routine.
11114 * @param a0 The first extra argument.
11115 * @param a1 The second extra argument.
11116 */
11117#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11118
11119/**
11120 * Defers the entire instruction emulation to a C implementation routine and
11121 * returns, taking three arguments in addition to the standard ones.
11122 *
11123 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11124 *
11125 * @param a_pfnCImpl The pointer to the C routine.
11126 * @param a0 The first extra argument.
11127 * @param a1 The second extra argument.
11128 * @param a2 The third extra argument.
11129 */
11130#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11131
11132/**
11133 * Calls a FPU assembly implementation taking one visible argument.
11134 *
11135 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11136 * @param a0 The first extra argument.
11137 */
11138#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11139 do { \
11140 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11141 } while (0)
11142
11143/**
11144 * Calls a FPU assembly implementation taking two visible arguments.
11145 *
11146 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11147 * @param a0 The first extra argument.
11148 * @param a1 The second extra argument.
11149 */
11150#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11151 do { \
11152 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11153 } while (0)
11154
11155/**
11156 * Calls a FPU assembly implementation taking three visible arguments.
11157 *
11158 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11159 * @param a0 The first extra argument.
11160 * @param a1 The second extra argument.
11161 * @param a2 The third extra argument.
11162 */
11163#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11164 do { \
11165 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11166 } while (0)
11167
11168#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11169 do { \
11170 (a_FpuData).FSW = (a_FSW); \
11171 (a_FpuData).r80Result = *(a_pr80Value); \
11172 } while (0)
11173
11174/** Pushes FPU result onto the stack. */
11175#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11176 iemFpuPushResult(pVCpu, &a_FpuData)
11177/** Pushes FPU result onto the stack and sets the FPUDP. */
11178#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11179 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11180
11181/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11182#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11183 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11184
11185/** Stores FPU result in a stack register. */
11186#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11187 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11188/** Stores FPU result in a stack register and pops the stack. */
11189#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11190 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11191/** Stores FPU result in a stack register and sets the FPUDP. */
11192#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11193 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11194/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11195 * stack. */
11196#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11197 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11198
11199/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11200#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11201 iemFpuUpdateOpcodeAndIp(pVCpu)
11202/** Free a stack register (for FFREE and FFREEP). */
11203#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11204 iemFpuStackFree(pVCpu, a_iStReg)
11205/** Increment the FPU stack pointer. */
11206#define IEM_MC_FPU_STACK_INC_TOP() \
11207 iemFpuStackIncTop(pVCpu)
11208/** Decrement the FPU stack pointer. */
11209#define IEM_MC_FPU_STACK_DEC_TOP() \
11210 iemFpuStackDecTop(pVCpu)
11211
11212/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11213#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11214 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11215/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11216#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11217 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11218/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11219#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11220 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11221/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11222#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11223 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11224/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11225 * stack. */
11226#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11227 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11228/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11229#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11230 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11231
11232/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11233#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11234 iemFpuStackUnderflow(pVCpu, a_iStDst)
11235/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11236 * stack. */
11237#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11238 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11239/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11240 * FPUDS. */
11241#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11242 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11243/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11244 * FPUDS. Pops stack. */
11245#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11246 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11247/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11248 * stack twice. */
11249#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11250 iemFpuStackUnderflowThenPopPop(pVCpu)
11251/** Raises a FPU stack underflow exception for an instruction pushing a result
11252 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11253#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11254 iemFpuStackPushUnderflow(pVCpu)
11255/** Raises a FPU stack underflow exception for an instruction pushing a result
11256 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11257#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11258 iemFpuStackPushUnderflowTwo(pVCpu)
11259
11260/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11261 * FPUIP, FPUCS and FOP. */
11262#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11263 iemFpuStackPushOverflow(pVCpu)
11264/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11265 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11266#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11267 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11268/** Prepares for using the FPU state.
11269 * Ensures that we can use the host FPU in the current context (RC+R0.
11270 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11271#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11272/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11273#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11274/** Actualizes the guest FPU state so it can be accessed and modified. */
11275#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11276
11277/** Prepares for using the SSE state.
11278 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11279 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11280#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11281/** Actualizes the guest XMM0..15 register state for read-only access. */
11282#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11283/** Actualizes the guest XMM0..15 register state for read-write access. */
11284#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11285
11286/**
11287 * Calls a MMX assembly implementation taking two visible arguments.
11288 *
11289 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11290 * @param a0 The first extra argument.
11291 * @param a1 The second extra argument.
11292 */
11293#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11294 do { \
11295 IEM_MC_PREPARE_FPU_USAGE(); \
11296 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11297 } while (0)
11298
11299/**
11300 * Calls a MMX assembly implementation taking three visible arguments.
11301 *
11302 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11303 * @param a0 The first extra argument.
11304 * @param a1 The second extra argument.
11305 * @param a2 The third extra argument.
11306 */
11307#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11308 do { \
11309 IEM_MC_PREPARE_FPU_USAGE(); \
11310 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11311 } while (0)
11312
11313
11314/**
11315 * Calls a SSE assembly implementation taking two visible arguments.
11316 *
11317 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11318 * @param a0 The first extra argument.
11319 * @param a1 The second extra argument.
11320 */
11321#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11322 do { \
11323 IEM_MC_PREPARE_SSE_USAGE(); \
11324 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11325 } while (0)
11326
11327/**
11328 * Calls a SSE assembly implementation taking three visible arguments.
11329 *
11330 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11331 * @param a0 The first extra argument.
11332 * @param a1 The second extra argument.
11333 * @param a2 The third extra argument.
11334 */
11335#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11336 do { \
11337 IEM_MC_PREPARE_SSE_USAGE(); \
11338 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11339 } while (0)
11340
11341/** @note Not for IOPL or IF testing. */
11342#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11343/** @note Not for IOPL or IF testing. */
11344#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11345/** @note Not for IOPL or IF testing. */
11346#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11347/** @note Not for IOPL or IF testing. */
11348#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11349/** @note Not for IOPL or IF testing. */
11350#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11351 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11352 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11353/** @note Not for IOPL or IF testing. */
11354#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11355 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11356 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11357/** @note Not for IOPL or IF testing. */
11358#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11359 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11360 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11361 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11362/** @note Not for IOPL or IF testing. */
11363#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11364 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11365 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11366 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11367#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11368#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11369#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11370/** @note Not for IOPL or IF testing. */
11371#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11372 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11373 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11374/** @note Not for IOPL or IF testing. */
11375#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11376 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11377 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11378/** @note Not for IOPL or IF testing. */
11379#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11380 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11381 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11382/** @note Not for IOPL or IF testing. */
11383#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11384 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11385 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11386/** @note Not for IOPL or IF testing. */
11387#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11388 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11389 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11390/** @note Not for IOPL or IF testing. */
11391#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11392 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11393 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11394#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11395#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11396
11397#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11398 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11399#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11400 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11401#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11402 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11403#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11404 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11405#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11406 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11407#define IEM_MC_IF_FCW_IM() \
11408 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11409
11410#define IEM_MC_ELSE() } else {
11411#define IEM_MC_ENDIF() } do {} while (0)
11412
11413/** @} */
11414
11415
11416/** @name Opcode Debug Helpers.
11417 * @{
11418 */
11419#ifdef VBOX_WITH_STATISTICS
11420# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11421#else
11422# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11423#endif
11424
11425#ifdef DEBUG
11426# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11427 do { \
11428 IEMOP_INC_STATS(a_Stats); \
11429 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11430 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11431 } while (0)
11432#else
11433# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11434#endif
11435
11436/** @} */
11437
11438
11439/** @name Opcode Helpers.
11440 * @{
11441 */
11442
11443#ifdef IN_RING3
11444# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11445 do { \
11446 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11447 else \
11448 { \
11449 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11450 return IEMOP_RAISE_INVALID_OPCODE(); \
11451 } \
11452 } while (0)
11453#else
11454# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11455 do { \
11456 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11457 else return IEMOP_RAISE_INVALID_OPCODE(); \
11458 } while (0)
11459#endif
11460
11461/** The instruction requires a 186 or later. */
11462#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11463# define IEMOP_HLP_MIN_186() do { } while (0)
11464#else
11465# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11466#endif
11467
11468/** The instruction requires a 286 or later. */
11469#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11470# define IEMOP_HLP_MIN_286() do { } while (0)
11471#else
11472# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11473#endif
11474
11475/** The instruction requires a 386 or later. */
11476#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11477# define IEMOP_HLP_MIN_386() do { } while (0)
11478#else
11479# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11480#endif
11481
11482/** The instruction requires a 386 or later if the given expression is true. */
11483#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11484# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11485#else
11486# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11487#endif
11488
11489/** The instruction requires a 486 or later. */
11490#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11491# define IEMOP_HLP_MIN_486() do { } while (0)
11492#else
11493# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11494#endif
11495
11496/** The instruction requires a Pentium (586) or later. */
11497#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11498# define IEMOP_HLP_MIN_586() do { } while (0)
11499#else
11500# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11501#endif
11502
11503/** The instruction requires a PentiumPro (686) or later. */
11504#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11505# define IEMOP_HLP_MIN_686() do { } while (0)
11506#else
11507# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11508#endif
11509
11510
11511/** The instruction raises an \#UD in real and V8086 mode. */
11512#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11513 do \
11514 { \
11515 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11516 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11517 } while (0)
11518
11519/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11520 * 64-bit mode. */
11521#define IEMOP_HLP_NO_64BIT() \
11522 do \
11523 { \
11524 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11525 return IEMOP_RAISE_INVALID_OPCODE(); \
11526 } while (0)
11527
11528/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11529 * 64-bit mode. */
11530#define IEMOP_HLP_ONLY_64BIT() \
11531 do \
11532 { \
11533 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11534 return IEMOP_RAISE_INVALID_OPCODE(); \
11535 } while (0)
11536
11537/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11538#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11539 do \
11540 { \
11541 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11542 iemRecalEffOpSize64Default(pVCpu); \
11543 } while (0)
11544
11545/** The instruction has 64-bit operand size if 64-bit mode. */
11546#define IEMOP_HLP_64BIT_OP_SIZE() \
11547 do \
11548 { \
11549 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11550 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11551 } while (0)
11552
11553/** Only a REX prefix immediately preceeding the first opcode byte takes
11554 * effect. This macro helps ensuring this as well as logging bad guest code. */
11555#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11556 do \
11557 { \
11558 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11559 { \
11560 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11561 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11562 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11563 pVCpu->iem.s.uRexB = 0; \
11564 pVCpu->iem.s.uRexIndex = 0; \
11565 pVCpu->iem.s.uRexReg = 0; \
11566 iemRecalEffOpSize(pVCpu); \
11567 } \
11568 } while (0)
11569
11570/**
11571 * Done decoding.
11572 */
11573#define IEMOP_HLP_DONE_DECODING() \
11574 do \
11575 { \
11576 /*nothing for now, maybe later... */ \
11577 } while (0)
11578
11579/**
11580 * Done decoding, raise \#UD exception if lock prefix present.
11581 */
11582#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11583 do \
11584 { \
11585 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11586 { /* likely */ } \
11587 else \
11588 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11589 } while (0)
11590#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11591 do \
11592 { \
11593 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11594 { /* likely */ } \
11595 else \
11596 { \
11597 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11598 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11599 } \
11600 } while (0)
11601#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11602 do \
11603 { \
11604 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11605 { /* likely */ } \
11606 else \
11607 { \
11608 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11609 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11610 } \
11611 } while (0)
11612
11613/**
11614 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11615 * are present.
11616 */
11617#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11618 do \
11619 { \
11620 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11621 { /* likely */ } \
11622 else \
11623 return IEMOP_RAISE_INVALID_OPCODE(); \
11624 } while (0)
11625
11626
11627/**
11628 * Calculates the effective address of a ModR/M memory operand.
11629 *
11630 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11631 *
11632 * @return Strict VBox status code.
11633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11634 * @param bRm The ModRM byte.
11635 * @param cbImm The size of any immediate following the
11636 * effective address opcode bytes. Important for
11637 * RIP relative addressing.
11638 * @param pGCPtrEff Where to return the effective address.
11639 */
11640IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11641{
11642 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11643 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11644# define SET_SS_DEF() \
11645 do \
11646 { \
11647 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11648 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11649 } while (0)
11650
11651 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11652 {
11653/** @todo Check the effective address size crap! */
11654 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11655 {
11656 uint16_t u16EffAddr;
11657
11658 /* Handle the disp16 form with no registers first. */
11659 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11660 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11661 else
11662 {
11663 /* Get the displacment. */
11664 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11665 {
11666 case 0: u16EffAddr = 0; break;
11667 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11668 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11669 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11670 }
11671
11672 /* Add the base and index registers to the disp. */
11673 switch (bRm & X86_MODRM_RM_MASK)
11674 {
11675 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11676 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11677 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11678 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11679 case 4: u16EffAddr += pCtx->si; break;
11680 case 5: u16EffAddr += pCtx->di; break;
11681 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11682 case 7: u16EffAddr += pCtx->bx; break;
11683 }
11684 }
11685
11686 *pGCPtrEff = u16EffAddr;
11687 }
11688 else
11689 {
11690 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11691 uint32_t u32EffAddr;
11692
11693 /* Handle the disp32 form with no registers first. */
11694 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11695 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11696 else
11697 {
11698 /* Get the register (or SIB) value. */
11699 switch ((bRm & X86_MODRM_RM_MASK))
11700 {
11701 case 0: u32EffAddr = pCtx->eax; break;
11702 case 1: u32EffAddr = pCtx->ecx; break;
11703 case 2: u32EffAddr = pCtx->edx; break;
11704 case 3: u32EffAddr = pCtx->ebx; break;
11705 case 4: /* SIB */
11706 {
11707 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11708
11709 /* Get the index and scale it. */
11710 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11711 {
11712 case 0: u32EffAddr = pCtx->eax; break;
11713 case 1: u32EffAddr = pCtx->ecx; break;
11714 case 2: u32EffAddr = pCtx->edx; break;
11715 case 3: u32EffAddr = pCtx->ebx; break;
11716 case 4: u32EffAddr = 0; /*none */ break;
11717 case 5: u32EffAddr = pCtx->ebp; break;
11718 case 6: u32EffAddr = pCtx->esi; break;
11719 case 7: u32EffAddr = pCtx->edi; break;
11720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11721 }
11722 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11723
11724 /* add base */
11725 switch (bSib & X86_SIB_BASE_MASK)
11726 {
11727 case 0: u32EffAddr += pCtx->eax; break;
11728 case 1: u32EffAddr += pCtx->ecx; break;
11729 case 2: u32EffAddr += pCtx->edx; break;
11730 case 3: u32EffAddr += pCtx->ebx; break;
11731 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11732 case 5:
11733 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11734 {
11735 u32EffAddr += pCtx->ebp;
11736 SET_SS_DEF();
11737 }
11738 else
11739 {
11740 uint32_t u32Disp;
11741 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11742 u32EffAddr += u32Disp;
11743 }
11744 break;
11745 case 6: u32EffAddr += pCtx->esi; break;
11746 case 7: u32EffAddr += pCtx->edi; break;
11747 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11748 }
11749 break;
11750 }
11751 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11752 case 6: u32EffAddr = pCtx->esi; break;
11753 case 7: u32EffAddr = pCtx->edi; break;
11754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11755 }
11756
11757 /* Get and add the displacement. */
11758 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11759 {
11760 case 0:
11761 break;
11762 case 1:
11763 {
11764 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11765 u32EffAddr += i8Disp;
11766 break;
11767 }
11768 case 2:
11769 {
11770 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11771 u32EffAddr += u32Disp;
11772 break;
11773 }
11774 default:
11775 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11776 }
11777
11778 }
11779 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11780 *pGCPtrEff = u32EffAddr;
11781 else
11782 {
11783 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11784 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11785 }
11786 }
11787 }
11788 else
11789 {
11790 uint64_t u64EffAddr;
11791
11792 /* Handle the rip+disp32 form with no registers first. */
11793 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11794 {
11795 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11796 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11797 }
11798 else
11799 {
11800 /* Get the register (or SIB) value. */
11801 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11802 {
11803 case 0: u64EffAddr = pCtx->rax; break;
11804 case 1: u64EffAddr = pCtx->rcx; break;
11805 case 2: u64EffAddr = pCtx->rdx; break;
11806 case 3: u64EffAddr = pCtx->rbx; break;
11807 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11808 case 6: u64EffAddr = pCtx->rsi; break;
11809 case 7: u64EffAddr = pCtx->rdi; break;
11810 case 8: u64EffAddr = pCtx->r8; break;
11811 case 9: u64EffAddr = pCtx->r9; break;
11812 case 10: u64EffAddr = pCtx->r10; break;
11813 case 11: u64EffAddr = pCtx->r11; break;
11814 case 13: u64EffAddr = pCtx->r13; break;
11815 case 14: u64EffAddr = pCtx->r14; break;
11816 case 15: u64EffAddr = pCtx->r15; break;
11817 /* SIB */
11818 case 4:
11819 case 12:
11820 {
11821 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11822
11823 /* Get the index and scale it. */
11824 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11825 {
11826 case 0: u64EffAddr = pCtx->rax; break;
11827 case 1: u64EffAddr = pCtx->rcx; break;
11828 case 2: u64EffAddr = pCtx->rdx; break;
11829 case 3: u64EffAddr = pCtx->rbx; break;
11830 case 4: u64EffAddr = 0; /*none */ break;
11831 case 5: u64EffAddr = pCtx->rbp; break;
11832 case 6: u64EffAddr = pCtx->rsi; break;
11833 case 7: u64EffAddr = pCtx->rdi; break;
11834 case 8: u64EffAddr = pCtx->r8; break;
11835 case 9: u64EffAddr = pCtx->r9; break;
11836 case 10: u64EffAddr = pCtx->r10; break;
11837 case 11: u64EffAddr = pCtx->r11; break;
11838 case 12: u64EffAddr = pCtx->r12; break;
11839 case 13: u64EffAddr = pCtx->r13; break;
11840 case 14: u64EffAddr = pCtx->r14; break;
11841 case 15: u64EffAddr = pCtx->r15; break;
11842 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11843 }
11844 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11845
11846 /* add base */
11847 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11848 {
11849 case 0: u64EffAddr += pCtx->rax; break;
11850 case 1: u64EffAddr += pCtx->rcx; break;
11851 case 2: u64EffAddr += pCtx->rdx; break;
11852 case 3: u64EffAddr += pCtx->rbx; break;
11853 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11854 case 6: u64EffAddr += pCtx->rsi; break;
11855 case 7: u64EffAddr += pCtx->rdi; break;
11856 case 8: u64EffAddr += pCtx->r8; break;
11857 case 9: u64EffAddr += pCtx->r9; break;
11858 case 10: u64EffAddr += pCtx->r10; break;
11859 case 11: u64EffAddr += pCtx->r11; break;
11860 case 12: u64EffAddr += pCtx->r12; break;
11861 case 14: u64EffAddr += pCtx->r14; break;
11862 case 15: u64EffAddr += pCtx->r15; break;
11863 /* complicated encodings */
11864 case 5:
11865 case 13:
11866 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11867 {
11868 if (!pVCpu->iem.s.uRexB)
11869 {
11870 u64EffAddr += pCtx->rbp;
11871 SET_SS_DEF();
11872 }
11873 else
11874 u64EffAddr += pCtx->r13;
11875 }
11876 else
11877 {
11878 uint32_t u32Disp;
11879 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11880 u64EffAddr += (int32_t)u32Disp;
11881 }
11882 break;
11883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11884 }
11885 break;
11886 }
11887 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11888 }
11889
11890 /* Get and add the displacement. */
11891 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11892 {
11893 case 0:
11894 break;
11895 case 1:
11896 {
11897 int8_t i8Disp;
11898 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11899 u64EffAddr += i8Disp;
11900 break;
11901 }
11902 case 2:
11903 {
11904 uint32_t u32Disp;
11905 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11906 u64EffAddr += (int32_t)u32Disp;
11907 break;
11908 }
11909 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11910 }
11911
11912 }
11913
11914 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11915 *pGCPtrEff = u64EffAddr;
11916 else
11917 {
11918 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11919 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11920 }
11921 }
11922
11923 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11924 return VINF_SUCCESS;
11925}
11926
11927
11928/**
11929 * Calculates the effective address of a ModR/M memory operand.
11930 *
11931 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11932 *
11933 * @return Strict VBox status code.
11934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11935 * @param bRm The ModRM byte.
11936 * @param cbImm The size of any immediate following the
11937 * effective address opcode bytes. Important for
11938 * RIP relative addressing.
11939 * @param pGCPtrEff Where to return the effective address.
11940 * @param offRsp RSP displacement.
11941 */
11942IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11943{
11944 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11945 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11946# define SET_SS_DEF() \
11947 do \
11948 { \
11949 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11950 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11951 } while (0)
11952
11953 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11954 {
11955/** @todo Check the effective address size crap! */
11956 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11957 {
11958 uint16_t u16EffAddr;
11959
11960 /* Handle the disp16 form with no registers first. */
11961 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11962 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11963 else
11964 {
11965 /* Get the displacment. */
11966 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11967 {
11968 case 0: u16EffAddr = 0; break;
11969 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11970 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11971 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11972 }
11973
11974 /* Add the base and index registers to the disp. */
11975 switch (bRm & X86_MODRM_RM_MASK)
11976 {
11977 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11978 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11979 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11980 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11981 case 4: u16EffAddr += pCtx->si; break;
11982 case 5: u16EffAddr += pCtx->di; break;
11983 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11984 case 7: u16EffAddr += pCtx->bx; break;
11985 }
11986 }
11987
11988 *pGCPtrEff = u16EffAddr;
11989 }
11990 else
11991 {
11992 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11993 uint32_t u32EffAddr;
11994
11995 /* Handle the disp32 form with no registers first. */
11996 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11997 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11998 else
11999 {
12000 /* Get the register (or SIB) value. */
12001 switch ((bRm & X86_MODRM_RM_MASK))
12002 {
12003 case 0: u32EffAddr = pCtx->eax; break;
12004 case 1: u32EffAddr = pCtx->ecx; break;
12005 case 2: u32EffAddr = pCtx->edx; break;
12006 case 3: u32EffAddr = pCtx->ebx; break;
12007 case 4: /* SIB */
12008 {
12009 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12010
12011 /* Get the index and scale it. */
12012 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12013 {
12014 case 0: u32EffAddr = pCtx->eax; break;
12015 case 1: u32EffAddr = pCtx->ecx; break;
12016 case 2: u32EffAddr = pCtx->edx; break;
12017 case 3: u32EffAddr = pCtx->ebx; break;
12018 case 4: u32EffAddr = 0; /*none */ break;
12019 case 5: u32EffAddr = pCtx->ebp; break;
12020 case 6: u32EffAddr = pCtx->esi; break;
12021 case 7: u32EffAddr = pCtx->edi; break;
12022 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12023 }
12024 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12025
12026 /* add base */
12027 switch (bSib & X86_SIB_BASE_MASK)
12028 {
12029 case 0: u32EffAddr += pCtx->eax; break;
12030 case 1: u32EffAddr += pCtx->ecx; break;
12031 case 2: u32EffAddr += pCtx->edx; break;
12032 case 3: u32EffAddr += pCtx->ebx; break;
12033 case 4:
12034 u32EffAddr += pCtx->esp + offRsp;
12035 SET_SS_DEF();
12036 break;
12037 case 5:
12038 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12039 {
12040 u32EffAddr += pCtx->ebp;
12041 SET_SS_DEF();
12042 }
12043 else
12044 {
12045 uint32_t u32Disp;
12046 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12047 u32EffAddr += u32Disp;
12048 }
12049 break;
12050 case 6: u32EffAddr += pCtx->esi; break;
12051 case 7: u32EffAddr += pCtx->edi; break;
12052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12053 }
12054 break;
12055 }
12056 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12057 case 6: u32EffAddr = pCtx->esi; break;
12058 case 7: u32EffAddr = pCtx->edi; break;
12059 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12060 }
12061
12062 /* Get and add the displacement. */
12063 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12064 {
12065 case 0:
12066 break;
12067 case 1:
12068 {
12069 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12070 u32EffAddr += i8Disp;
12071 break;
12072 }
12073 case 2:
12074 {
12075 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12076 u32EffAddr += u32Disp;
12077 break;
12078 }
12079 default:
12080 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12081 }
12082
12083 }
12084 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12085 *pGCPtrEff = u32EffAddr;
12086 else
12087 {
12088 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12089 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12090 }
12091 }
12092 }
12093 else
12094 {
12095 uint64_t u64EffAddr;
12096
12097 /* Handle the rip+disp32 form with no registers first. */
12098 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12099 {
12100 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12101 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12102 }
12103 else
12104 {
12105 /* Get the register (or SIB) value. */
12106 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12107 {
12108 case 0: u64EffAddr = pCtx->rax; break;
12109 case 1: u64EffAddr = pCtx->rcx; break;
12110 case 2: u64EffAddr = pCtx->rdx; break;
12111 case 3: u64EffAddr = pCtx->rbx; break;
12112 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12113 case 6: u64EffAddr = pCtx->rsi; break;
12114 case 7: u64EffAddr = pCtx->rdi; break;
12115 case 8: u64EffAddr = pCtx->r8; break;
12116 case 9: u64EffAddr = pCtx->r9; break;
12117 case 10: u64EffAddr = pCtx->r10; break;
12118 case 11: u64EffAddr = pCtx->r11; break;
12119 case 13: u64EffAddr = pCtx->r13; break;
12120 case 14: u64EffAddr = pCtx->r14; break;
12121 case 15: u64EffAddr = pCtx->r15; break;
12122 /* SIB */
12123 case 4:
12124 case 12:
12125 {
12126 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12127
12128 /* Get the index and scale it. */
12129 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12130 {
12131 case 0: u64EffAddr = pCtx->rax; break;
12132 case 1: u64EffAddr = pCtx->rcx; break;
12133 case 2: u64EffAddr = pCtx->rdx; break;
12134 case 3: u64EffAddr = pCtx->rbx; break;
12135 case 4: u64EffAddr = 0; /*none */ break;
12136 case 5: u64EffAddr = pCtx->rbp; break;
12137 case 6: u64EffAddr = pCtx->rsi; break;
12138 case 7: u64EffAddr = pCtx->rdi; break;
12139 case 8: u64EffAddr = pCtx->r8; break;
12140 case 9: u64EffAddr = pCtx->r9; break;
12141 case 10: u64EffAddr = pCtx->r10; break;
12142 case 11: u64EffAddr = pCtx->r11; break;
12143 case 12: u64EffAddr = pCtx->r12; break;
12144 case 13: u64EffAddr = pCtx->r13; break;
12145 case 14: u64EffAddr = pCtx->r14; break;
12146 case 15: u64EffAddr = pCtx->r15; break;
12147 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12148 }
12149 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12150
12151 /* add base */
12152 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12153 {
12154 case 0: u64EffAddr += pCtx->rax; break;
12155 case 1: u64EffAddr += pCtx->rcx; break;
12156 case 2: u64EffAddr += pCtx->rdx; break;
12157 case 3: u64EffAddr += pCtx->rbx; break;
12158 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12159 case 6: u64EffAddr += pCtx->rsi; break;
12160 case 7: u64EffAddr += pCtx->rdi; break;
12161 case 8: u64EffAddr += pCtx->r8; break;
12162 case 9: u64EffAddr += pCtx->r9; break;
12163 case 10: u64EffAddr += pCtx->r10; break;
12164 case 11: u64EffAddr += pCtx->r11; break;
12165 case 12: u64EffAddr += pCtx->r12; break;
12166 case 14: u64EffAddr += pCtx->r14; break;
12167 case 15: u64EffAddr += pCtx->r15; break;
12168 /* complicated encodings */
12169 case 5:
12170 case 13:
12171 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12172 {
12173 if (!pVCpu->iem.s.uRexB)
12174 {
12175 u64EffAddr += pCtx->rbp;
12176 SET_SS_DEF();
12177 }
12178 else
12179 u64EffAddr += pCtx->r13;
12180 }
12181 else
12182 {
12183 uint32_t u32Disp;
12184 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12185 u64EffAddr += (int32_t)u32Disp;
12186 }
12187 break;
12188 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12189 }
12190 break;
12191 }
12192 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12193 }
12194
12195 /* Get and add the displacement. */
12196 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12197 {
12198 case 0:
12199 break;
12200 case 1:
12201 {
12202 int8_t i8Disp;
12203 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12204 u64EffAddr += i8Disp;
12205 break;
12206 }
12207 case 2:
12208 {
12209 uint32_t u32Disp;
12210 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12211 u64EffAddr += (int32_t)u32Disp;
12212 break;
12213 }
12214 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12215 }
12216
12217 }
12218
12219 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12220 *pGCPtrEff = u64EffAddr;
12221 else
12222 {
12223 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12224 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12225 }
12226 }
12227
12228 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12229 return VINF_SUCCESS;
12230}
12231
12232
12233#ifdef IEM_WITH_SETJMP
12234/**
12235 * Calculates the effective address of a ModR/M memory operand.
12236 *
12237 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12238 *
12239 * May longjmp on internal error.
12240 *
12241 * @return The effective address.
12242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12243 * @param bRm The ModRM byte.
12244 * @param cbImm The size of any immediate following the
12245 * effective address opcode bytes. Important for
12246 * RIP relative addressing.
12247 */
12248IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12249{
12250 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12251 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12252# define SET_SS_DEF() \
12253 do \
12254 { \
12255 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12256 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12257 } while (0)
12258
12259 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12260 {
12261/** @todo Check the effective address size crap! */
12262 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12263 {
12264 uint16_t u16EffAddr;
12265
12266 /* Handle the disp16 form with no registers first. */
12267 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12268 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12269 else
12270 {
12271 /* Get the displacment. */
12272 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12273 {
12274 case 0: u16EffAddr = 0; break;
12275 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12276 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12277 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12278 }
12279
12280 /* Add the base and index registers to the disp. */
12281 switch (bRm & X86_MODRM_RM_MASK)
12282 {
12283 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12284 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12285 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12286 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12287 case 4: u16EffAddr += pCtx->si; break;
12288 case 5: u16EffAddr += pCtx->di; break;
12289 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12290 case 7: u16EffAddr += pCtx->bx; break;
12291 }
12292 }
12293
12294 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12295 return u16EffAddr;
12296 }
12297
12298 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12299 uint32_t u32EffAddr;
12300
12301 /* Handle the disp32 form with no registers first. */
12302 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12303 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12304 else
12305 {
12306 /* Get the register (or SIB) value. */
12307 switch ((bRm & X86_MODRM_RM_MASK))
12308 {
12309 case 0: u32EffAddr = pCtx->eax; break;
12310 case 1: u32EffAddr = pCtx->ecx; break;
12311 case 2: u32EffAddr = pCtx->edx; break;
12312 case 3: u32EffAddr = pCtx->ebx; break;
12313 case 4: /* SIB */
12314 {
12315 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12316
12317 /* Get the index and scale it. */
12318 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12319 {
12320 case 0: u32EffAddr = pCtx->eax; break;
12321 case 1: u32EffAddr = pCtx->ecx; break;
12322 case 2: u32EffAddr = pCtx->edx; break;
12323 case 3: u32EffAddr = pCtx->ebx; break;
12324 case 4: u32EffAddr = 0; /*none */ break;
12325 case 5: u32EffAddr = pCtx->ebp; break;
12326 case 6: u32EffAddr = pCtx->esi; break;
12327 case 7: u32EffAddr = pCtx->edi; break;
12328 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12329 }
12330 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12331
12332 /* add base */
12333 switch (bSib & X86_SIB_BASE_MASK)
12334 {
12335 case 0: u32EffAddr += pCtx->eax; break;
12336 case 1: u32EffAddr += pCtx->ecx; break;
12337 case 2: u32EffAddr += pCtx->edx; break;
12338 case 3: u32EffAddr += pCtx->ebx; break;
12339 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12340 case 5:
12341 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12342 {
12343 u32EffAddr += pCtx->ebp;
12344 SET_SS_DEF();
12345 }
12346 else
12347 {
12348 uint32_t u32Disp;
12349 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12350 u32EffAddr += u32Disp;
12351 }
12352 break;
12353 case 6: u32EffAddr += pCtx->esi; break;
12354 case 7: u32EffAddr += pCtx->edi; break;
12355 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12356 }
12357 break;
12358 }
12359 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12360 case 6: u32EffAddr = pCtx->esi; break;
12361 case 7: u32EffAddr = pCtx->edi; break;
12362 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12363 }
12364
12365 /* Get and add the displacement. */
12366 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12367 {
12368 case 0:
12369 break;
12370 case 1:
12371 {
12372 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12373 u32EffAddr += i8Disp;
12374 break;
12375 }
12376 case 2:
12377 {
12378 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12379 u32EffAddr += u32Disp;
12380 break;
12381 }
12382 default:
12383 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12384 }
12385 }
12386
12387 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12388 {
12389 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12390 return u32EffAddr;
12391 }
12392 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12393 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12394 return u32EffAddr & UINT16_MAX;
12395 }
12396
12397 uint64_t u64EffAddr;
12398
12399 /* Handle the rip+disp32 form with no registers first. */
12400 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12401 {
12402 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12403 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12404 }
12405 else
12406 {
12407 /* Get the register (or SIB) value. */
12408 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12409 {
12410 case 0: u64EffAddr = pCtx->rax; break;
12411 case 1: u64EffAddr = pCtx->rcx; break;
12412 case 2: u64EffAddr = pCtx->rdx; break;
12413 case 3: u64EffAddr = pCtx->rbx; break;
12414 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12415 case 6: u64EffAddr = pCtx->rsi; break;
12416 case 7: u64EffAddr = pCtx->rdi; break;
12417 case 8: u64EffAddr = pCtx->r8; break;
12418 case 9: u64EffAddr = pCtx->r9; break;
12419 case 10: u64EffAddr = pCtx->r10; break;
12420 case 11: u64EffAddr = pCtx->r11; break;
12421 case 13: u64EffAddr = pCtx->r13; break;
12422 case 14: u64EffAddr = pCtx->r14; break;
12423 case 15: u64EffAddr = pCtx->r15; break;
12424 /* SIB */
12425 case 4:
12426 case 12:
12427 {
12428 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12429
12430 /* Get the index and scale it. */
12431 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12432 {
12433 case 0: u64EffAddr = pCtx->rax; break;
12434 case 1: u64EffAddr = pCtx->rcx; break;
12435 case 2: u64EffAddr = pCtx->rdx; break;
12436 case 3: u64EffAddr = pCtx->rbx; break;
12437 case 4: u64EffAddr = 0; /*none */ break;
12438 case 5: u64EffAddr = pCtx->rbp; break;
12439 case 6: u64EffAddr = pCtx->rsi; break;
12440 case 7: u64EffAddr = pCtx->rdi; break;
12441 case 8: u64EffAddr = pCtx->r8; break;
12442 case 9: u64EffAddr = pCtx->r9; break;
12443 case 10: u64EffAddr = pCtx->r10; break;
12444 case 11: u64EffAddr = pCtx->r11; break;
12445 case 12: u64EffAddr = pCtx->r12; break;
12446 case 13: u64EffAddr = pCtx->r13; break;
12447 case 14: u64EffAddr = pCtx->r14; break;
12448 case 15: u64EffAddr = pCtx->r15; break;
12449 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12450 }
12451 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12452
12453 /* add base */
12454 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12455 {
12456 case 0: u64EffAddr += pCtx->rax; break;
12457 case 1: u64EffAddr += pCtx->rcx; break;
12458 case 2: u64EffAddr += pCtx->rdx; break;
12459 case 3: u64EffAddr += pCtx->rbx; break;
12460 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12461 case 6: u64EffAddr += pCtx->rsi; break;
12462 case 7: u64EffAddr += pCtx->rdi; break;
12463 case 8: u64EffAddr += pCtx->r8; break;
12464 case 9: u64EffAddr += pCtx->r9; break;
12465 case 10: u64EffAddr += pCtx->r10; break;
12466 case 11: u64EffAddr += pCtx->r11; break;
12467 case 12: u64EffAddr += pCtx->r12; break;
12468 case 14: u64EffAddr += pCtx->r14; break;
12469 case 15: u64EffAddr += pCtx->r15; break;
12470 /* complicated encodings */
12471 case 5:
12472 case 13:
12473 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12474 {
12475 if (!pVCpu->iem.s.uRexB)
12476 {
12477 u64EffAddr += pCtx->rbp;
12478 SET_SS_DEF();
12479 }
12480 else
12481 u64EffAddr += pCtx->r13;
12482 }
12483 else
12484 {
12485 uint32_t u32Disp;
12486 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12487 u64EffAddr += (int32_t)u32Disp;
12488 }
12489 break;
12490 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12491 }
12492 break;
12493 }
12494 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12495 }
12496
12497 /* Get and add the displacement. */
12498 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12499 {
12500 case 0:
12501 break;
12502 case 1:
12503 {
12504 int8_t i8Disp;
12505 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12506 u64EffAddr += i8Disp;
12507 break;
12508 }
12509 case 2:
12510 {
12511 uint32_t u32Disp;
12512 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12513 u64EffAddr += (int32_t)u32Disp;
12514 break;
12515 }
12516 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12517 }
12518
12519 }
12520
12521 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12522 {
12523 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12524 return u64EffAddr;
12525 }
12526 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12527 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12528 return u64EffAddr & UINT32_MAX;
12529}
12530#endif /* IEM_WITH_SETJMP */
12531
12532
12533/** @} */
12534
12535
12536
12537/*
12538 * Include the instructions
12539 */
12540#include "IEMAllInstructions.cpp.h"
12541
12542
12543
12544
12545#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12546
12547/**
12548 * Sets up execution verification mode.
12549 */
12550IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12551{
12552 PVMCPU pVCpu = pVCpu;
12553 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12554
12555 /*
12556 * Always note down the address of the current instruction.
12557 */
12558 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12559 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12560
12561 /*
12562 * Enable verification and/or logging.
12563 */
12564 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12565 if ( fNewNoRem
12566 && ( 0
12567#if 0 /* auto enable on first paged protected mode interrupt */
12568 || ( pOrgCtx->eflags.Bits.u1IF
12569 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12570 && TRPMHasTrap(pVCpu)
12571 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12572#endif
12573#if 0
12574 || ( pOrgCtx->cs == 0x10
12575 && ( pOrgCtx->rip == 0x90119e3e
12576 || pOrgCtx->rip == 0x901d9810)
12577#endif
12578#if 0 /* Auto enable DSL - FPU stuff. */
12579 || ( pOrgCtx->cs == 0x10
12580 && (// pOrgCtx->rip == 0xc02ec07f
12581 //|| pOrgCtx->rip == 0xc02ec082
12582 //|| pOrgCtx->rip == 0xc02ec0c9
12583 0
12584 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12585#endif
12586#if 0 /* Auto enable DSL - fstp st0 stuff. */
12587 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12588#endif
12589#if 0
12590 || pOrgCtx->rip == 0x9022bb3a
12591#endif
12592#if 0
12593 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12594#endif
12595#if 0
12596 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12597 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12598#endif
12599#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12600 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12601 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12602 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12603#endif
12604#if 0 /* NT4SP1 - xadd early boot. */
12605 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12606#endif
12607#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12608 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12609#endif
12610#if 0 /* NT4SP1 - cmpxchg (AMD). */
12611 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12612#endif
12613#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12614 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12615#endif
12616#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12617 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12618
12619#endif
12620#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12621 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12622
12623#endif
12624#if 0 /* NT4SP1 - frstor [ecx] */
12625 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12626#endif
12627#if 0 /* xxxxxx - All long mode code. */
12628 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12629#endif
12630#if 0 /* rep movsq linux 3.7 64-bit boot. */
12631 || (pOrgCtx->rip == 0x0000000000100241)
12632#endif
12633#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12634 || (pOrgCtx->rip == 0x000000000215e240)
12635#endif
12636#if 0 /* DOS's size-overridden iret to v8086. */
12637 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12638#endif
12639 )
12640 )
12641 {
12642 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12643 RTLogFlags(NULL, "enabled");
12644 fNewNoRem = false;
12645 }
12646 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12647 {
12648 pVCpu->iem.s.fNoRem = fNewNoRem;
12649 if (!fNewNoRem)
12650 {
12651 LogAlways(("Enabling verification mode!\n"));
12652 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12653 }
12654 else
12655 LogAlways(("Disabling verification mode!\n"));
12656 }
12657
12658 /*
12659 * Switch state.
12660 */
12661 if (IEM_VERIFICATION_ENABLED(pVCpu))
12662 {
12663 static CPUMCTX s_DebugCtx; /* Ugly! */
12664
12665 s_DebugCtx = *pOrgCtx;
12666 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12667 }
12668
12669 /*
12670 * See if there is an interrupt pending in TRPM and inject it if we can.
12671 */
12672 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12673 if ( pOrgCtx->eflags.Bits.u1IF
12674 && TRPMHasTrap(pVCpu)
12675 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12676 {
12677 uint8_t u8TrapNo;
12678 TRPMEVENT enmType;
12679 RTGCUINT uErrCode;
12680 RTGCPTR uCr2;
12681 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12682 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12683 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12684 TRPMResetTrap(pVCpu);
12685 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12686 }
12687
12688 /*
12689 * Reset the counters.
12690 */
12691 pVCpu->iem.s.cIOReads = 0;
12692 pVCpu->iem.s.cIOWrites = 0;
12693 pVCpu->iem.s.fIgnoreRaxRdx = false;
12694 pVCpu->iem.s.fOverlappingMovs = false;
12695 pVCpu->iem.s.fProblematicMemory = false;
12696 pVCpu->iem.s.fUndefinedEFlags = 0;
12697
12698 if (IEM_VERIFICATION_ENABLED(pVCpu))
12699 {
12700 /*
12701 * Free all verification records.
12702 */
12703 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12704 pVCpu->iem.s.pIemEvtRecHead = NULL;
12705 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12706 do
12707 {
12708 while (pEvtRec)
12709 {
12710 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12711 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12712 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12713 pEvtRec = pNext;
12714 }
12715 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12716 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12717 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12718 } while (pEvtRec);
12719 }
12720}
12721
12722
12723/**
12724 * Allocate an event record.
12725 * @returns Pointer to a record.
12726 */
12727IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12728{
12729 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12730 return NULL;
12731
12732 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12733 if (pEvtRec)
12734 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12735 else
12736 {
12737 if (!pVCpu->iem.s.ppIemEvtRecNext)
12738 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12739
12740 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12741 if (!pEvtRec)
12742 return NULL;
12743 }
12744 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12745 pEvtRec->pNext = NULL;
12746 return pEvtRec;
12747}
12748
12749
12750/**
12751 * IOMMMIORead notification.
12752 */
12753VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12754{
12755 PVMCPU pVCpu = VMMGetCpu(pVM);
12756 if (!pVCpu)
12757 return;
12758 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12759 if (!pEvtRec)
12760 return;
12761 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12762 pEvtRec->u.RamRead.GCPhys = GCPhys;
12763 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12764 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12765 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12766}
12767
12768
12769/**
12770 * IOMMMIOWrite notification.
12771 */
12772VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12773{
12774 PVMCPU pVCpu = VMMGetCpu(pVM);
12775 if (!pVCpu)
12776 return;
12777 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12778 if (!pEvtRec)
12779 return;
12780 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12781 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12782 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12783 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12784 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12785 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12786 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12787 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12788 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12789}
12790
12791
12792/**
12793 * IOMIOPortRead notification.
12794 */
12795VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12796{
12797 PVMCPU pVCpu = VMMGetCpu(pVM);
12798 if (!pVCpu)
12799 return;
12800 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12801 if (!pEvtRec)
12802 return;
12803 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12804 pEvtRec->u.IOPortRead.Port = Port;
12805 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12806 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12807 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12808}
12809
12810/**
12811 * IOMIOPortWrite notification.
12812 */
12813VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12814{
12815 PVMCPU pVCpu = VMMGetCpu(pVM);
12816 if (!pVCpu)
12817 return;
12818 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12819 if (!pEvtRec)
12820 return;
12821 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12822 pEvtRec->u.IOPortWrite.Port = Port;
12823 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12824 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12825 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12826 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12827}
12828
12829
12830VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12831{
12832 PVMCPU pVCpu = VMMGetCpu(pVM);
12833 if (!pVCpu)
12834 return;
12835 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12836 if (!pEvtRec)
12837 return;
12838 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12839 pEvtRec->u.IOPortStrRead.Port = Port;
12840 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12841 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12842 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12843 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12844}
12845
12846
12847VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12848{
12849 PVMCPU pVCpu = VMMGetCpu(pVM);
12850 if (!pVCpu)
12851 return;
12852 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12853 if (!pEvtRec)
12854 return;
12855 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12856 pEvtRec->u.IOPortStrWrite.Port = Port;
12857 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12858 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12859 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12860 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12861}
12862
12863
12864/**
12865 * Fakes and records an I/O port read.
12866 *
12867 * @returns VINF_SUCCESS.
12868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12869 * @param Port The I/O port.
12870 * @param pu32Value Where to store the fake value.
12871 * @param cbValue The size of the access.
12872 */
12873IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12874{
12875 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12876 if (pEvtRec)
12877 {
12878 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12879 pEvtRec->u.IOPortRead.Port = Port;
12880 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12881 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12882 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12883 }
12884 pVCpu->iem.s.cIOReads++;
12885 *pu32Value = 0xcccccccc;
12886 return VINF_SUCCESS;
12887}
12888
12889
12890/**
12891 * Fakes and records an I/O port write.
12892 *
12893 * @returns VINF_SUCCESS.
12894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12895 * @param Port The I/O port.
12896 * @param u32Value The value being written.
12897 * @param cbValue The size of the access.
12898 */
12899IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12900{
12901 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12902 if (pEvtRec)
12903 {
12904 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12905 pEvtRec->u.IOPortWrite.Port = Port;
12906 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12907 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12908 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12909 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12910 }
12911 pVCpu->iem.s.cIOWrites++;
12912 return VINF_SUCCESS;
12913}
12914
12915
12916/**
12917 * Used to add extra details about a stub case.
12918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12919 */
12920IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12921{
12922 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12923 PVM pVM = pVCpu->CTX_SUFF(pVM);
12924 PVMCPU pVCpu = pVCpu;
12925 char szRegs[4096];
12926 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12927 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12928 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12929 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12930 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12931 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12932 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12933 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12934 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12935 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12936 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12937 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12938 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12939 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12940 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12941 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12942 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12943 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12944 " efer=%016VR{efer}\n"
12945 " pat=%016VR{pat}\n"
12946 " sf_mask=%016VR{sf_mask}\n"
12947 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12948 " lstar=%016VR{lstar}\n"
12949 " star=%016VR{star} cstar=%016VR{cstar}\n"
12950 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12951 );
12952
12953 char szInstr1[256];
12954 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12955 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12956 szInstr1, sizeof(szInstr1), NULL);
12957 char szInstr2[256];
12958 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12959 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12960 szInstr2, sizeof(szInstr2), NULL);
12961
12962 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12963}
12964
12965
12966/**
12967 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12968 * dump to the assertion info.
12969 *
12970 * @param pEvtRec The record to dump.
12971 */
12972IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12973{
12974 switch (pEvtRec->enmEvent)
12975 {
12976 case IEMVERIFYEVENT_IOPORT_READ:
12977 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12978 pEvtRec->u.IOPortWrite.Port,
12979 pEvtRec->u.IOPortWrite.cbValue);
12980 break;
12981 case IEMVERIFYEVENT_IOPORT_WRITE:
12982 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12983 pEvtRec->u.IOPortWrite.Port,
12984 pEvtRec->u.IOPortWrite.cbValue,
12985 pEvtRec->u.IOPortWrite.u32Value);
12986 break;
12987 case IEMVERIFYEVENT_IOPORT_STR_READ:
12988 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12989 pEvtRec->u.IOPortStrWrite.Port,
12990 pEvtRec->u.IOPortStrWrite.cbValue,
12991 pEvtRec->u.IOPortStrWrite.cTransfers);
12992 break;
12993 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12994 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12995 pEvtRec->u.IOPortStrWrite.Port,
12996 pEvtRec->u.IOPortStrWrite.cbValue,
12997 pEvtRec->u.IOPortStrWrite.cTransfers);
12998 break;
12999 case IEMVERIFYEVENT_RAM_READ:
13000 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13001 pEvtRec->u.RamRead.GCPhys,
13002 pEvtRec->u.RamRead.cb);
13003 break;
13004 case IEMVERIFYEVENT_RAM_WRITE:
13005 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13006 pEvtRec->u.RamWrite.GCPhys,
13007 pEvtRec->u.RamWrite.cb,
13008 (int)pEvtRec->u.RamWrite.cb,
13009 pEvtRec->u.RamWrite.ab);
13010 break;
13011 default:
13012 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13013 break;
13014 }
13015}
13016
13017
13018/**
13019 * Raises an assertion on the specified record, showing the given message with
13020 * a record dump attached.
13021 *
13022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13023 * @param pEvtRec1 The first record.
13024 * @param pEvtRec2 The second record.
13025 * @param pszMsg The message explaining why we're asserting.
13026 */
13027IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13028{
13029 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13030 iemVerifyAssertAddRecordDump(pEvtRec1);
13031 iemVerifyAssertAddRecordDump(pEvtRec2);
13032 iemVerifyAssertMsg2(pVCpu);
13033 RTAssertPanic();
13034}
13035
13036
13037/**
13038 * Raises an assertion on the specified record, showing the given message with
13039 * a record dump attached.
13040 *
13041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13042 * @param pEvtRec1 The first record.
13043 * @param pszMsg The message explaining why we're asserting.
13044 */
13045IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13046{
13047 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13048 iemVerifyAssertAddRecordDump(pEvtRec);
13049 iemVerifyAssertMsg2(pVCpu);
13050 RTAssertPanic();
13051}
13052
13053
13054/**
13055 * Verifies a write record.
13056 *
13057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13058 * @param pEvtRec The write record.
13059 * @param fRem Set if REM was doing the other executing. If clear
13060 * it was HM.
13061 */
13062IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13063{
13064 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13065 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13066 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13067 if ( RT_FAILURE(rc)
13068 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13069 {
13070 /* fend off ins */
13071 if ( !pVCpu->iem.s.cIOReads
13072 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13073 || ( pEvtRec->u.RamWrite.cb != 1
13074 && pEvtRec->u.RamWrite.cb != 2
13075 && pEvtRec->u.RamWrite.cb != 4) )
13076 {
13077 /* fend off ROMs and MMIO */
13078 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13079 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13080 {
13081 /* fend off fxsave */
13082 if (pEvtRec->u.RamWrite.cb != 512)
13083 {
13084 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13085 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13086 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13087 RTAssertMsg2Add("%s: %.*Rhxs\n"
13088 "iem: %.*Rhxs\n",
13089 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13090 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13091 iemVerifyAssertAddRecordDump(pEvtRec);
13092 iemVerifyAssertMsg2(pVCpu);
13093 RTAssertPanic();
13094 }
13095 }
13096 }
13097 }
13098
13099}
13100
13101/**
13102 * Performs the post-execution verfication checks.
13103 */
13104IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13105{
13106 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13107 return rcStrictIem;
13108
13109 /*
13110 * Switch back the state.
13111 */
13112 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13113 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13114 Assert(pOrgCtx != pDebugCtx);
13115 IEM_GET_CTX(pVCpu) = pOrgCtx;
13116
13117 /*
13118 * Execute the instruction in REM.
13119 */
13120 bool fRem = false;
13121 PVM pVM = pVCpu->CTX_SUFF(pVM);
13122 PVMCPU pVCpu = pVCpu;
13123 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13124#ifdef IEM_VERIFICATION_MODE_FULL_HM
13125 if ( HMIsEnabled(pVM)
13126 && pVCpu->iem.s.cIOReads == 0
13127 && pVCpu->iem.s.cIOWrites == 0
13128 && !pVCpu->iem.s.fProblematicMemory)
13129 {
13130 uint64_t uStartRip = pOrgCtx->rip;
13131 unsigned iLoops = 0;
13132 do
13133 {
13134 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13135 iLoops++;
13136 } while ( rc == VINF_SUCCESS
13137 || ( rc == VINF_EM_DBG_STEPPED
13138 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13139 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13140 || ( pOrgCtx->rip != pDebugCtx->rip
13141 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13142 && iLoops < 8) );
13143 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13144 rc = VINF_SUCCESS;
13145 }
13146#endif
13147 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13148 || rc == VINF_IOM_R3_IOPORT_READ
13149 || rc == VINF_IOM_R3_IOPORT_WRITE
13150 || rc == VINF_IOM_R3_MMIO_READ
13151 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13152 || rc == VINF_IOM_R3_MMIO_WRITE
13153 || rc == VINF_CPUM_R3_MSR_READ
13154 || rc == VINF_CPUM_R3_MSR_WRITE
13155 || rc == VINF_EM_RESCHEDULE
13156 )
13157 {
13158 EMRemLock(pVM);
13159 rc = REMR3EmulateInstruction(pVM, pVCpu);
13160 AssertRC(rc);
13161 EMRemUnlock(pVM);
13162 fRem = true;
13163 }
13164
13165# if 1 /* Skip unimplemented instructions for now. */
13166 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13167 {
13168 IEM_GET_CTX(pVCpu) = pOrgCtx;
13169 if (rc == VINF_EM_DBG_STEPPED)
13170 return VINF_SUCCESS;
13171 return rc;
13172 }
13173# endif
13174
13175 /*
13176 * Compare the register states.
13177 */
13178 unsigned cDiffs = 0;
13179 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13180 {
13181 //Log(("REM and IEM ends up with different registers!\n"));
13182 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13183
13184# define CHECK_FIELD(a_Field) \
13185 do \
13186 { \
13187 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13188 { \
13189 switch (sizeof(pOrgCtx->a_Field)) \
13190 { \
13191 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13192 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13193 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13194 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13195 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13196 } \
13197 cDiffs++; \
13198 } \
13199 } while (0)
13200# define CHECK_XSTATE_FIELD(a_Field) \
13201 do \
13202 { \
13203 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13204 { \
13205 switch (sizeof(pOrgXState->a_Field)) \
13206 { \
13207 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13208 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13209 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13210 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13211 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13212 } \
13213 cDiffs++; \
13214 } \
13215 } while (0)
13216
13217# define CHECK_BIT_FIELD(a_Field) \
13218 do \
13219 { \
13220 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13221 { \
13222 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13223 cDiffs++; \
13224 } \
13225 } while (0)
13226
13227# define CHECK_SEL(a_Sel) \
13228 do \
13229 { \
13230 CHECK_FIELD(a_Sel.Sel); \
13231 CHECK_FIELD(a_Sel.Attr.u); \
13232 CHECK_FIELD(a_Sel.u64Base); \
13233 CHECK_FIELD(a_Sel.u32Limit); \
13234 CHECK_FIELD(a_Sel.fFlags); \
13235 } while (0)
13236
13237 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13238 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13239
13240#if 1 /* The recompiler doesn't update these the intel way. */
13241 if (fRem)
13242 {
13243 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13244 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13245 pOrgXState->x87.CS = pDebugXState->x87.CS;
13246 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13247 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13248 pOrgXState->x87.DS = pDebugXState->x87.DS;
13249 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13250 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13251 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13252 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13253 }
13254#endif
13255 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13256 {
13257 RTAssertMsg2Weak(" the FPU state differs\n");
13258 cDiffs++;
13259 CHECK_XSTATE_FIELD(x87.FCW);
13260 CHECK_XSTATE_FIELD(x87.FSW);
13261 CHECK_XSTATE_FIELD(x87.FTW);
13262 CHECK_XSTATE_FIELD(x87.FOP);
13263 CHECK_XSTATE_FIELD(x87.FPUIP);
13264 CHECK_XSTATE_FIELD(x87.CS);
13265 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13266 CHECK_XSTATE_FIELD(x87.FPUDP);
13267 CHECK_XSTATE_FIELD(x87.DS);
13268 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13269 CHECK_XSTATE_FIELD(x87.MXCSR);
13270 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13271 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13272 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13273 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13274 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13275 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13276 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13277 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13278 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13279 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13280 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13281 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13282 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13283 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13284 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13285 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13286 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13287 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13288 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13289 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13290 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13291 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13292 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13293 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13294 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13295 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13296 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13297 }
13298 CHECK_FIELD(rip);
13299 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13300 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13301 {
13302 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13303 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13304 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13305 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13306 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13307 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13308 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13309 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13310 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13311 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13312 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13313 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13314 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13315 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13316 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13317 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13318 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13319 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13320 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13321 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13322 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13323 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13324 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13325 }
13326
13327 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13328 CHECK_FIELD(rax);
13329 CHECK_FIELD(rcx);
13330 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13331 CHECK_FIELD(rdx);
13332 CHECK_FIELD(rbx);
13333 CHECK_FIELD(rsp);
13334 CHECK_FIELD(rbp);
13335 CHECK_FIELD(rsi);
13336 CHECK_FIELD(rdi);
13337 CHECK_FIELD(r8);
13338 CHECK_FIELD(r9);
13339 CHECK_FIELD(r10);
13340 CHECK_FIELD(r11);
13341 CHECK_FIELD(r12);
13342 CHECK_FIELD(r13);
13343 CHECK_SEL(cs);
13344 CHECK_SEL(ss);
13345 CHECK_SEL(ds);
13346 CHECK_SEL(es);
13347 CHECK_SEL(fs);
13348 CHECK_SEL(gs);
13349 CHECK_FIELD(cr0);
13350
13351 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13352 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13353 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13354 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13355 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13356 {
13357 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13358 { /* ignore */ }
13359 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13360 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13361 && fRem)
13362 { /* ignore */ }
13363 else
13364 CHECK_FIELD(cr2);
13365 }
13366 CHECK_FIELD(cr3);
13367 CHECK_FIELD(cr4);
13368 CHECK_FIELD(dr[0]);
13369 CHECK_FIELD(dr[1]);
13370 CHECK_FIELD(dr[2]);
13371 CHECK_FIELD(dr[3]);
13372 CHECK_FIELD(dr[6]);
13373 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13374 CHECK_FIELD(dr[7]);
13375 CHECK_FIELD(gdtr.cbGdt);
13376 CHECK_FIELD(gdtr.pGdt);
13377 CHECK_FIELD(idtr.cbIdt);
13378 CHECK_FIELD(idtr.pIdt);
13379 CHECK_SEL(ldtr);
13380 CHECK_SEL(tr);
13381 CHECK_FIELD(SysEnter.cs);
13382 CHECK_FIELD(SysEnter.eip);
13383 CHECK_FIELD(SysEnter.esp);
13384 CHECK_FIELD(msrEFER);
13385 CHECK_FIELD(msrSTAR);
13386 CHECK_FIELD(msrPAT);
13387 CHECK_FIELD(msrLSTAR);
13388 CHECK_FIELD(msrCSTAR);
13389 CHECK_FIELD(msrSFMASK);
13390 CHECK_FIELD(msrKERNELGSBASE);
13391
13392 if (cDiffs != 0)
13393 {
13394 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13395 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13396 RTAssertPanic();
13397 static bool volatile s_fEnterDebugger = true;
13398 if (s_fEnterDebugger)
13399 DBGFSTOP(pVM);
13400
13401# if 1 /* Ignore unimplemented instructions for now. */
13402 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13403 rcStrictIem = VINF_SUCCESS;
13404# endif
13405 }
13406# undef CHECK_FIELD
13407# undef CHECK_BIT_FIELD
13408 }
13409
13410 /*
13411 * If the register state compared fine, check the verification event
13412 * records.
13413 */
13414 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13415 {
13416 /*
13417 * Compare verficiation event records.
13418 * - I/O port accesses should be a 1:1 match.
13419 */
13420 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13421 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13422 while (pIemRec && pOtherRec)
13423 {
13424 /* Since we might miss RAM writes and reads, ignore reads and check
13425 that any written memory is the same extra ones. */
13426 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13427 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13428 && pIemRec->pNext)
13429 {
13430 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13431 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13432 pIemRec = pIemRec->pNext;
13433 }
13434
13435 /* Do the compare. */
13436 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13437 {
13438 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13439 break;
13440 }
13441 bool fEquals;
13442 switch (pIemRec->enmEvent)
13443 {
13444 case IEMVERIFYEVENT_IOPORT_READ:
13445 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13446 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13447 break;
13448 case IEMVERIFYEVENT_IOPORT_WRITE:
13449 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13450 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13451 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13452 break;
13453 case IEMVERIFYEVENT_IOPORT_STR_READ:
13454 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13455 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13456 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13457 break;
13458 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13459 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13460 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13461 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13462 break;
13463 case IEMVERIFYEVENT_RAM_READ:
13464 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13465 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13466 break;
13467 case IEMVERIFYEVENT_RAM_WRITE:
13468 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13469 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13470 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13471 break;
13472 default:
13473 fEquals = false;
13474 break;
13475 }
13476 if (!fEquals)
13477 {
13478 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13479 break;
13480 }
13481
13482 /* advance */
13483 pIemRec = pIemRec->pNext;
13484 pOtherRec = pOtherRec->pNext;
13485 }
13486
13487 /* Ignore extra writes and reads. */
13488 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13489 {
13490 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13491 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13492 pIemRec = pIemRec->pNext;
13493 }
13494 if (pIemRec != NULL)
13495 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13496 else if (pOtherRec != NULL)
13497 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13498 }
13499 IEM_GET_CTX(pVCpu) = pOrgCtx;
13500
13501 return rcStrictIem;
13502}
13503
13504#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13505
13506/* stubs */
13507IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13508{
13509 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13510 return VERR_INTERNAL_ERROR;
13511}
13512
13513IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13514{
13515 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13516 return VERR_INTERNAL_ERROR;
13517}
13518
13519#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13520
13521
13522#ifdef LOG_ENABLED
13523/**
13524 * Logs the current instruction.
13525 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13526 * @param pCtx The current CPU context.
13527 * @param fSameCtx Set if we have the same context information as the VMM,
13528 * clear if we may have already executed an instruction in
13529 * our debug context. When clear, we assume IEMCPU holds
13530 * valid CPU mode info.
13531 */
13532IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13533{
13534# ifdef IN_RING3
13535 if (LogIs2Enabled())
13536 {
13537 char szInstr[256];
13538 uint32_t cbInstr = 0;
13539 if (fSameCtx)
13540 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13541 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13542 szInstr, sizeof(szInstr), &cbInstr);
13543 else
13544 {
13545 uint32_t fFlags = 0;
13546 switch (pVCpu->iem.s.enmCpuMode)
13547 {
13548 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13549 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13550 case IEMMODE_16BIT:
13551 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13552 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13553 else
13554 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13555 break;
13556 }
13557 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13558 szInstr, sizeof(szInstr), &cbInstr);
13559 }
13560
13561 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13562 Log2(("****\n"
13563 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13564 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13565 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13566 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13567 " %s\n"
13568 ,
13569 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13570 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13571 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13572 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13573 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13574 szInstr));
13575
13576 if (LogIs3Enabled())
13577 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13578 }
13579 else
13580# endif
13581 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13582 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13583 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13584}
13585#endif
13586
13587
13588/**
13589 * Makes status code addjustments (pass up from I/O and access handler)
13590 * as well as maintaining statistics.
13591 *
13592 * @returns Strict VBox status code to pass up.
13593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13594 * @param rcStrict The status from executing an instruction.
13595 */
13596DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13597{
13598 if (rcStrict != VINF_SUCCESS)
13599 {
13600 if (RT_SUCCESS(rcStrict))
13601 {
13602 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13603 || rcStrict == VINF_IOM_R3_IOPORT_READ
13604 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13605 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13606 || rcStrict == VINF_IOM_R3_MMIO_READ
13607 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13608 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13609 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13610 || rcStrict == VINF_CPUM_R3_MSR_READ
13611 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13612 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13613 || rcStrict == VINF_EM_RAW_TO_R3
13614 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13615 /* raw-mode / virt handlers only: */
13616 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13617 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13618 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13619 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13620 || rcStrict == VINF_SELM_SYNC_GDT
13621 || rcStrict == VINF_CSAM_PENDING_ACTION
13622 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13623 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13624/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13625 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13626 if (rcPassUp == VINF_SUCCESS)
13627 pVCpu->iem.s.cRetInfStatuses++;
13628 else if ( rcPassUp < VINF_EM_FIRST
13629 || rcPassUp > VINF_EM_LAST
13630 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13631 {
13632 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13633 pVCpu->iem.s.cRetPassUpStatus++;
13634 rcStrict = rcPassUp;
13635 }
13636 else
13637 {
13638 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13639 pVCpu->iem.s.cRetInfStatuses++;
13640 }
13641 }
13642 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13643 pVCpu->iem.s.cRetAspectNotImplemented++;
13644 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13645 pVCpu->iem.s.cRetInstrNotImplemented++;
13646#ifdef IEM_VERIFICATION_MODE_FULL
13647 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13648 rcStrict = VINF_SUCCESS;
13649#endif
13650 else
13651 pVCpu->iem.s.cRetErrStatuses++;
13652 }
13653 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13654 {
13655 pVCpu->iem.s.cRetPassUpStatus++;
13656 rcStrict = pVCpu->iem.s.rcPassUp;
13657 }
13658
13659 return rcStrict;
13660}
13661
13662
13663/**
13664 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13665 * IEMExecOneWithPrefetchedByPC.
13666 *
13667 * Similar code is found in IEMExecLots.
13668 *
13669 * @return Strict VBox status code.
13670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13672 * @param fExecuteInhibit If set, execute the instruction following CLI,
13673 * POP SS and MOV SS,GR.
13674 */
13675DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13676{
13677#ifdef IEM_WITH_SETJMP
13678 VBOXSTRICTRC rcStrict;
13679 jmp_buf JmpBuf;
13680 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13681 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13682 if ((rcStrict = setjmp(JmpBuf)) == 0)
13683 {
13684 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13685 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13686 }
13687 else
13688 pVCpu->iem.s.cLongJumps++;
13689 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13690#else
13691 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13692 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13693#endif
13694 if (rcStrict == VINF_SUCCESS)
13695 pVCpu->iem.s.cInstructions++;
13696 if (pVCpu->iem.s.cActiveMappings > 0)
13697 {
13698 Assert(rcStrict != VINF_SUCCESS);
13699 iemMemRollback(pVCpu);
13700 }
13701//#ifdef DEBUG
13702// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13703//#endif
13704
13705 /* Execute the next instruction as well if a cli, pop ss or
13706 mov ss, Gr has just completed successfully. */
13707 if ( fExecuteInhibit
13708 && rcStrict == VINF_SUCCESS
13709 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13710 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13711 {
13712 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13713 if (rcStrict == VINF_SUCCESS)
13714 {
13715#ifdef LOG_ENABLED
13716 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13717#endif
13718#ifdef IEM_WITH_SETJMP
13719 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13720 if ((rcStrict = setjmp(JmpBuf)) == 0)
13721 {
13722 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13723 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13724 }
13725 else
13726 pVCpu->iem.s.cLongJumps++;
13727 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13728#else
13729 IEM_OPCODE_GET_NEXT_U8(&b);
13730 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13731#endif
13732 if (rcStrict == VINF_SUCCESS)
13733 pVCpu->iem.s.cInstructions++;
13734 if (pVCpu->iem.s.cActiveMappings > 0)
13735 {
13736 Assert(rcStrict != VINF_SUCCESS);
13737 iemMemRollback(pVCpu);
13738 }
13739 }
13740 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13741 }
13742
13743 /*
13744 * Return value fiddling, statistics and sanity assertions.
13745 */
13746 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13747
13748 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13749 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13750#if defined(IEM_VERIFICATION_MODE_FULL)
13751 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13752 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13753 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13754 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13755#endif
13756 return rcStrict;
13757}
13758
13759
13760#ifdef IN_RC
13761/**
13762 * Re-enters raw-mode or ensure we return to ring-3.
13763 *
13764 * @returns rcStrict, maybe modified.
13765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13766 * @param pCtx The current CPU context.
13767 * @param rcStrict The status code returne by the interpreter.
13768 */
13769DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13770{
13771 if ( !pVCpu->iem.s.fInPatchCode
13772 && ( rcStrict == VINF_SUCCESS
13773 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13774 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13775 {
13776 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13777 CPUMRawEnter(pVCpu);
13778 else
13779 {
13780 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13781 rcStrict = VINF_EM_RESCHEDULE;
13782 }
13783 }
13784 return rcStrict;
13785}
13786#endif
13787
13788
13789/**
13790 * Execute one instruction.
13791 *
13792 * @return Strict VBox status code.
13793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13794 */
13795VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13796{
13797#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13798 if (++pVCpu->iem.s.cVerifyDepth == 1)
13799 iemExecVerificationModeSetup(pVCpu);
13800#endif
13801#ifdef LOG_ENABLED
13802 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13803 iemLogCurInstr(pVCpu, pCtx, true);
13804#endif
13805
13806 /*
13807 * Do the decoding and emulation.
13808 */
13809 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13810 if (rcStrict == VINF_SUCCESS)
13811 rcStrict = iemExecOneInner(pVCpu, true);
13812
13813#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13814 /*
13815 * Assert some sanity.
13816 */
13817 if (pVCpu->iem.s.cVerifyDepth == 1)
13818 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13819 pVCpu->iem.s.cVerifyDepth--;
13820#endif
13821#ifdef IN_RC
13822 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13823#endif
13824 if (rcStrict != VINF_SUCCESS)
13825 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13826 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13827 return rcStrict;
13828}
13829
13830
13831VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13832{
13833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13834 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13835
13836 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13837 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13838 if (rcStrict == VINF_SUCCESS)
13839 {
13840 rcStrict = iemExecOneInner(pVCpu, true);
13841 if (pcbWritten)
13842 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13843 }
13844
13845#ifdef IN_RC
13846 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13847#endif
13848 return rcStrict;
13849}
13850
13851
13852VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13853 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13854{
13855 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13856 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13857
13858 VBOXSTRICTRC rcStrict;
13859 if ( cbOpcodeBytes
13860 && pCtx->rip == OpcodeBytesPC)
13861 {
13862 iemInitDecoder(pVCpu, false);
13863#ifdef IEM_WITH_CODE_TLB
13864 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13865 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13866 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13867 pVCpu->iem.s.offCurInstrStart = 0;
13868 pVCpu->iem.s.offInstrNextByte = 0;
13869#else
13870 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13871 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13872#endif
13873 rcStrict = VINF_SUCCESS;
13874 }
13875 else
13876 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13877 if (rcStrict == VINF_SUCCESS)
13878 {
13879 rcStrict = iemExecOneInner(pVCpu, true);
13880 }
13881
13882#ifdef IN_RC
13883 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13884#endif
13885 return rcStrict;
13886}
13887
13888
13889VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13890{
13891 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13892 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13893
13894 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13895 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13896 if (rcStrict == VINF_SUCCESS)
13897 {
13898 rcStrict = iemExecOneInner(pVCpu, false);
13899 if (pcbWritten)
13900 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13901 }
13902
13903#ifdef IN_RC
13904 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13905#endif
13906 return rcStrict;
13907}
13908
13909
13910VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13911 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13912{
13913 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13914 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13915
13916 VBOXSTRICTRC rcStrict;
13917 if ( cbOpcodeBytes
13918 && pCtx->rip == OpcodeBytesPC)
13919 {
13920 iemInitDecoder(pVCpu, true);
13921#ifdef IEM_WITH_CODE_TLB
13922 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13923 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13924 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13925 pVCpu->iem.s.offCurInstrStart = 0;
13926 pVCpu->iem.s.offInstrNextByte = 0;
13927#else
13928 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13929 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13930#endif
13931 rcStrict = VINF_SUCCESS;
13932 }
13933 else
13934 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13935 if (rcStrict == VINF_SUCCESS)
13936 rcStrict = iemExecOneInner(pVCpu, false);
13937
13938#ifdef IN_RC
13939 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13940#endif
13941 return rcStrict;
13942}
13943
13944
13945/**
13946 * For debugging DISGetParamSize, may come in handy.
13947 *
13948 * @returns Strict VBox status code.
13949 * @param pVCpu The cross context virtual CPU structure of the
13950 * calling EMT.
13951 * @param pCtxCore The context core structure.
13952 * @param OpcodeBytesPC The PC of the opcode bytes.
13953 * @param pvOpcodeBytes Prefeched opcode bytes.
13954 * @param cbOpcodeBytes Number of prefetched bytes.
13955 * @param pcbWritten Where to return the number of bytes written.
13956 * Optional.
13957 */
13958VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13959 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13960 uint32_t *pcbWritten)
13961{
13962 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13963 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13964
13965 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13966 VBOXSTRICTRC rcStrict;
13967 if ( cbOpcodeBytes
13968 && pCtx->rip == OpcodeBytesPC)
13969 {
13970 iemInitDecoder(pVCpu, true);
13971#ifdef IEM_WITH_CODE_TLB
13972 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13973 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13974 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13975 pVCpu->iem.s.offCurInstrStart = 0;
13976 pVCpu->iem.s.offInstrNextByte = 0;
13977#else
13978 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13979 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13980#endif
13981 rcStrict = VINF_SUCCESS;
13982 }
13983 else
13984 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13985 if (rcStrict == VINF_SUCCESS)
13986 {
13987 rcStrict = iemExecOneInner(pVCpu, false);
13988 if (pcbWritten)
13989 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13990 }
13991
13992#ifdef IN_RC
13993 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13994#endif
13995 return rcStrict;
13996}
13997
13998
13999VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14000{
14001 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14002
14003#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14004 /*
14005 * See if there is an interrupt pending in TRPM, inject it if we can.
14006 */
14007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14008# ifdef IEM_VERIFICATION_MODE_FULL
14009 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14010# endif
14011 if ( pCtx->eflags.Bits.u1IF
14012 && TRPMHasTrap(pVCpu)
14013 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14014 {
14015 uint8_t u8TrapNo;
14016 TRPMEVENT enmType;
14017 RTGCUINT uErrCode;
14018 RTGCPTR uCr2;
14019 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14020 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14021 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14022 TRPMResetTrap(pVCpu);
14023 }
14024
14025 /*
14026 * Log the state.
14027 */
14028# ifdef LOG_ENABLED
14029 iemLogCurInstr(pVCpu, pCtx, true);
14030# endif
14031
14032 /*
14033 * Do the decoding and emulation.
14034 */
14035 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14036 if (rcStrict == VINF_SUCCESS)
14037 rcStrict = iemExecOneInner(pVCpu, true);
14038
14039 /*
14040 * Assert some sanity.
14041 */
14042 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14043
14044 /*
14045 * Log and return.
14046 */
14047 if (rcStrict != VINF_SUCCESS)
14048 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14049 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14050 if (pcInstructions)
14051 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14052 return rcStrict;
14053
14054#else /* Not verification mode */
14055
14056 /*
14057 * See if there is an interrupt pending in TRPM, inject it if we can.
14058 */
14059 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14060# ifdef IEM_VERIFICATION_MODE_FULL
14061 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14062# endif
14063 if ( pCtx->eflags.Bits.u1IF
14064 && TRPMHasTrap(pVCpu)
14065 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14066 {
14067 uint8_t u8TrapNo;
14068 TRPMEVENT enmType;
14069 RTGCUINT uErrCode;
14070 RTGCPTR uCr2;
14071 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14072 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14073 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14074 TRPMResetTrap(pVCpu);
14075 }
14076
14077 /*
14078 * Initial decoder init w/ prefetch, then setup setjmp.
14079 */
14080 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14081 if (rcStrict == VINF_SUCCESS)
14082 {
14083# ifdef IEM_WITH_SETJMP
14084 jmp_buf JmpBuf;
14085 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14086 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14087 pVCpu->iem.s.cActiveMappings = 0;
14088 if ((rcStrict = setjmp(JmpBuf)) == 0)
14089# endif
14090 {
14091 /*
14092 * The run loop. We limit ourselves to 4096 instructions right now.
14093 */
14094 PVM pVM = pVCpu->CTX_SUFF(pVM);
14095 uint32_t cInstr = 4096;
14096 for (;;)
14097 {
14098 /*
14099 * Log the state.
14100 */
14101# ifdef LOG_ENABLED
14102 iemLogCurInstr(pVCpu, pCtx, true);
14103# endif
14104
14105 /*
14106 * Do the decoding and emulation.
14107 */
14108 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14109 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14110 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14111 {
14112 Assert(pVCpu->iem.s.cActiveMappings == 0);
14113 pVCpu->iem.s.cInstructions++;
14114 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14115 {
14116 uint32_t fCpu = pVCpu->fLocalForcedActions
14117 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14118 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14119 | VMCPU_FF_TLB_FLUSH
14120# ifdef VBOX_WITH_RAW_MODE
14121 | VMCPU_FF_TRPM_SYNC_IDT
14122 | VMCPU_FF_SELM_SYNC_TSS
14123 | VMCPU_FF_SELM_SYNC_GDT
14124 | VMCPU_FF_SELM_SYNC_LDT
14125# endif
14126 | VMCPU_FF_INHIBIT_INTERRUPTS
14127 | VMCPU_FF_BLOCK_NMIS ));
14128
14129 if (RT_LIKELY( ( !fCpu
14130 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14131 && !pCtx->rflags.Bits.u1IF) )
14132 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14133 {
14134 if (cInstr-- > 0)
14135 {
14136 Assert(pVCpu->iem.s.cActiveMappings == 0);
14137 iemReInitDecoder(pVCpu);
14138 continue;
14139 }
14140 }
14141 }
14142 Assert(pVCpu->iem.s.cActiveMappings == 0);
14143 }
14144 else if (pVCpu->iem.s.cActiveMappings > 0)
14145 iemMemRollback(pVCpu);
14146 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14147 break;
14148 }
14149 }
14150# ifdef IEM_WITH_SETJMP
14151 else
14152 {
14153 if (pVCpu->iem.s.cActiveMappings > 0)
14154 iemMemRollback(pVCpu);
14155 pVCpu->iem.s.cLongJumps++;
14156 }
14157 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14158# endif
14159
14160 /*
14161 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14162 */
14163 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14164 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14165# if defined(IEM_VERIFICATION_MODE_FULL)
14166 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14167 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14168 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14170# endif
14171 }
14172
14173 /*
14174 * Maybe re-enter raw-mode and log.
14175 */
14176# ifdef IN_RC
14177 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14178# endif
14179 if (rcStrict != VINF_SUCCESS)
14180 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14181 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14182 if (pcInstructions)
14183 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14184 return rcStrict;
14185#endif /* Not verification mode */
14186}
14187
14188
14189
14190/**
14191 * Injects a trap, fault, abort, software interrupt or external interrupt.
14192 *
14193 * The parameter list matches TRPMQueryTrapAll pretty closely.
14194 *
14195 * @returns Strict VBox status code.
14196 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14197 * @param u8TrapNo The trap number.
14198 * @param enmType What type is it (trap/fault/abort), software
14199 * interrupt or hardware interrupt.
14200 * @param uErrCode The error code if applicable.
14201 * @param uCr2 The CR2 value if applicable.
14202 * @param cbInstr The instruction length (only relevant for
14203 * software interrupts).
14204 */
14205VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14206 uint8_t cbInstr)
14207{
14208 iemInitDecoder(pVCpu, false);
14209#ifdef DBGFTRACE_ENABLED
14210 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14211 u8TrapNo, enmType, uErrCode, uCr2);
14212#endif
14213
14214 uint32_t fFlags;
14215 switch (enmType)
14216 {
14217 case TRPM_HARDWARE_INT:
14218 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14219 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14220 uErrCode = uCr2 = 0;
14221 break;
14222
14223 case TRPM_SOFTWARE_INT:
14224 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14225 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14226 uErrCode = uCr2 = 0;
14227 break;
14228
14229 case TRPM_TRAP:
14230 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14231 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14232 if (u8TrapNo == X86_XCPT_PF)
14233 fFlags |= IEM_XCPT_FLAGS_CR2;
14234 switch (u8TrapNo)
14235 {
14236 case X86_XCPT_DF:
14237 case X86_XCPT_TS:
14238 case X86_XCPT_NP:
14239 case X86_XCPT_SS:
14240 case X86_XCPT_PF:
14241 case X86_XCPT_AC:
14242 fFlags |= IEM_XCPT_FLAGS_ERR;
14243 break;
14244
14245 case X86_XCPT_NMI:
14246 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14247 break;
14248 }
14249 break;
14250
14251 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14252 }
14253
14254 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14255}
14256
14257
14258/**
14259 * Injects the active TRPM event.
14260 *
14261 * @returns Strict VBox status code.
14262 * @param pVCpu The cross context virtual CPU structure.
14263 */
14264VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14265{
14266#ifndef IEM_IMPLEMENTS_TASKSWITCH
14267 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14268#else
14269 uint8_t u8TrapNo;
14270 TRPMEVENT enmType;
14271 RTGCUINT uErrCode;
14272 RTGCUINTPTR uCr2;
14273 uint8_t cbInstr;
14274 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14275 if (RT_FAILURE(rc))
14276 return rc;
14277
14278 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14279
14280 /** @todo Are there any other codes that imply the event was successfully
14281 * delivered to the guest? See @bugref{6607}. */
14282 if ( rcStrict == VINF_SUCCESS
14283 || rcStrict == VINF_IEM_RAISED_XCPT)
14284 {
14285 TRPMResetTrap(pVCpu);
14286 }
14287 return rcStrict;
14288#endif
14289}
14290
14291
14292VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14293{
14294 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14295 return VERR_NOT_IMPLEMENTED;
14296}
14297
14298
14299VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14300{
14301 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14302 return VERR_NOT_IMPLEMENTED;
14303}
14304
14305
14306#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14307/**
14308 * Executes a IRET instruction with default operand size.
14309 *
14310 * This is for PATM.
14311 *
14312 * @returns VBox status code.
14313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14314 * @param pCtxCore The register frame.
14315 */
14316VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14317{
14318 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14319
14320 iemCtxCoreToCtx(pCtx, pCtxCore);
14321 iemInitDecoder(pVCpu);
14322 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14323 if (rcStrict == VINF_SUCCESS)
14324 iemCtxToCtxCore(pCtxCore, pCtx);
14325 else
14326 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14327 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14328 return rcStrict;
14329}
14330#endif
14331
14332
14333/**
14334 * Macro used by the IEMExec* method to check the given instruction length.
14335 *
14336 * Will return on failure!
14337 *
14338 * @param a_cbInstr The given instruction length.
14339 * @param a_cbMin The minimum length.
14340 */
14341#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14342 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14343 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14344
14345
14346/**
14347 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14348 *
14349 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14350 *
14351 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14353 * @param rcStrict The status code to fiddle.
14354 */
14355DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14356{
14357 iemUninitExec(pVCpu);
14358#ifdef IN_RC
14359 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14360 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14361#else
14362 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14363#endif
14364}
14365
14366
14367/**
14368 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14369 *
14370 * This API ASSUMES that the caller has already verified that the guest code is
14371 * allowed to access the I/O port. (The I/O port is in the DX register in the
14372 * guest state.)
14373 *
14374 * @returns Strict VBox status code.
14375 * @param pVCpu The cross context virtual CPU structure.
14376 * @param cbValue The size of the I/O port access (1, 2, or 4).
14377 * @param enmAddrMode The addressing mode.
14378 * @param fRepPrefix Indicates whether a repeat prefix is used
14379 * (doesn't matter which for this instruction).
14380 * @param cbInstr The instruction length in bytes.
14381 * @param iEffSeg The effective segment address.
14382 * @param fIoChecked Whether the access to the I/O port has been
14383 * checked or not. It's typically checked in the
14384 * HM scenario.
14385 */
14386VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14387 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14388{
14389 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14391
14392 /*
14393 * State init.
14394 */
14395 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14396
14397 /*
14398 * Switch orgy for getting to the right handler.
14399 */
14400 VBOXSTRICTRC rcStrict;
14401 if (fRepPrefix)
14402 {
14403 switch (enmAddrMode)
14404 {
14405 case IEMMODE_16BIT:
14406 switch (cbValue)
14407 {
14408 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14409 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14410 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14411 default:
14412 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14413 }
14414 break;
14415
14416 case IEMMODE_32BIT:
14417 switch (cbValue)
14418 {
14419 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14420 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14421 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14422 default:
14423 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14424 }
14425 break;
14426
14427 case IEMMODE_64BIT:
14428 switch (cbValue)
14429 {
14430 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14431 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14432 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14433 default:
14434 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14435 }
14436 break;
14437
14438 default:
14439 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14440 }
14441 }
14442 else
14443 {
14444 switch (enmAddrMode)
14445 {
14446 case IEMMODE_16BIT:
14447 switch (cbValue)
14448 {
14449 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14450 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14451 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14452 default:
14453 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14454 }
14455 break;
14456
14457 case IEMMODE_32BIT:
14458 switch (cbValue)
14459 {
14460 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14461 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14462 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14463 default:
14464 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14465 }
14466 break;
14467
14468 case IEMMODE_64BIT:
14469 switch (cbValue)
14470 {
14471 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14472 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14473 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14474 default:
14475 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14476 }
14477 break;
14478
14479 default:
14480 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14481 }
14482 }
14483
14484 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14485}
14486
14487
14488/**
14489 * Interface for HM and EM for executing string I/O IN (read) instructions.
14490 *
14491 * This API ASSUMES that the caller has already verified that the guest code is
14492 * allowed to access the I/O port. (The I/O port is in the DX register in the
14493 * guest state.)
14494 *
14495 * @returns Strict VBox status code.
14496 * @param pVCpu The cross context virtual CPU structure.
14497 * @param cbValue The size of the I/O port access (1, 2, or 4).
14498 * @param enmAddrMode The addressing mode.
14499 * @param fRepPrefix Indicates whether a repeat prefix is used
14500 * (doesn't matter which for this instruction).
14501 * @param cbInstr The instruction length in bytes.
14502 * @param fIoChecked Whether the access to the I/O port has been
14503 * checked or not. It's typically checked in the
14504 * HM scenario.
14505 */
14506VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14507 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14508{
14509 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14510
14511 /*
14512 * State init.
14513 */
14514 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14515
14516 /*
14517 * Switch orgy for getting to the right handler.
14518 */
14519 VBOXSTRICTRC rcStrict;
14520 if (fRepPrefix)
14521 {
14522 switch (enmAddrMode)
14523 {
14524 case IEMMODE_16BIT:
14525 switch (cbValue)
14526 {
14527 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14528 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14529 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14530 default:
14531 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14532 }
14533 break;
14534
14535 case IEMMODE_32BIT:
14536 switch (cbValue)
14537 {
14538 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14539 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14540 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14541 default:
14542 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14543 }
14544 break;
14545
14546 case IEMMODE_64BIT:
14547 switch (cbValue)
14548 {
14549 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14550 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14551 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14552 default:
14553 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14554 }
14555 break;
14556
14557 default:
14558 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14559 }
14560 }
14561 else
14562 {
14563 switch (enmAddrMode)
14564 {
14565 case IEMMODE_16BIT:
14566 switch (cbValue)
14567 {
14568 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14569 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14570 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14571 default:
14572 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14573 }
14574 break;
14575
14576 case IEMMODE_32BIT:
14577 switch (cbValue)
14578 {
14579 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14580 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14581 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14582 default:
14583 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14584 }
14585 break;
14586
14587 case IEMMODE_64BIT:
14588 switch (cbValue)
14589 {
14590 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14591 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14592 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14593 default:
14594 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14595 }
14596 break;
14597
14598 default:
14599 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14600 }
14601 }
14602
14603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14604}
14605
14606
14607/**
14608 * Interface for rawmode to write execute an OUT instruction.
14609 *
14610 * @returns Strict VBox status code.
14611 * @param pVCpu The cross context virtual CPU structure.
14612 * @param cbInstr The instruction length in bytes.
14613 * @param u16Port The port to read.
14614 * @param cbReg The register size.
14615 *
14616 * @remarks In ring-0 not all of the state needs to be synced in.
14617 */
14618VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14619{
14620 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14621 Assert(cbReg <= 4 && cbReg != 3);
14622
14623 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14624 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14625 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14626}
14627
14628
14629/**
14630 * Interface for rawmode to write execute an IN instruction.
14631 *
14632 * @returns Strict VBox status code.
14633 * @param pVCpu The cross context virtual CPU structure.
14634 * @param cbInstr The instruction length in bytes.
14635 * @param u16Port The port to read.
14636 * @param cbReg The register size.
14637 */
14638VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14639{
14640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14641 Assert(cbReg <= 4 && cbReg != 3);
14642
14643 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14644 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14645 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14646}
14647
14648
14649/**
14650 * Interface for HM and EM to write to a CRx register.
14651 *
14652 * @returns Strict VBox status code.
14653 * @param pVCpu The cross context virtual CPU structure.
14654 * @param cbInstr The instruction length in bytes.
14655 * @param iCrReg The control register number (destination).
14656 * @param iGReg The general purpose register number (source).
14657 *
14658 * @remarks In ring-0 not all of the state needs to be synced in.
14659 */
14660VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14661{
14662 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14663 Assert(iCrReg < 16);
14664 Assert(iGReg < 16);
14665
14666 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14667 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14668 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14669}
14670
14671
14672/**
14673 * Interface for HM and EM to read from a CRx register.
14674 *
14675 * @returns Strict VBox status code.
14676 * @param pVCpu The cross context virtual CPU structure.
14677 * @param cbInstr The instruction length in bytes.
14678 * @param iGReg The general purpose register number (destination).
14679 * @param iCrReg The control register number (source).
14680 *
14681 * @remarks In ring-0 not all of the state needs to be synced in.
14682 */
14683VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14684{
14685 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14686 Assert(iCrReg < 16);
14687 Assert(iGReg < 16);
14688
14689 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14690 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14691 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14692}
14693
14694
14695/**
14696 * Interface for HM and EM to clear the CR0[TS] bit.
14697 *
14698 * @returns Strict VBox status code.
14699 * @param pVCpu The cross context virtual CPU structure.
14700 * @param cbInstr The instruction length in bytes.
14701 *
14702 * @remarks In ring-0 not all of the state needs to be synced in.
14703 */
14704VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14705{
14706 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14707
14708 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14710 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14711}
14712
14713
14714/**
14715 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14716 *
14717 * @returns Strict VBox status code.
14718 * @param pVCpu The cross context virtual CPU structure.
14719 * @param cbInstr The instruction length in bytes.
14720 * @param uValue The value to load into CR0.
14721 *
14722 * @remarks In ring-0 not all of the state needs to be synced in.
14723 */
14724VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14725{
14726 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14727
14728 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14729 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14730 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14731}
14732
14733
14734/**
14735 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14736 *
14737 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14738 *
14739 * @returns Strict VBox status code.
14740 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14741 * @param cbInstr The instruction length in bytes.
14742 * @remarks In ring-0 not all of the state needs to be synced in.
14743 * @thread EMT(pVCpu)
14744 */
14745VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14746{
14747 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14748
14749 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14750 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14752}
14753
14754#ifdef IN_RING3
14755
14756/**
14757 * Handles the unlikely and probably fatal merge cases.
14758 *
14759 * @returns Merged status code.
14760 * @param rcStrict Current EM status code.
14761 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14762 * with @a rcStrict.
14763 * @param iMemMap The memory mapping index. For error reporting only.
14764 * @param pVCpu The cross context virtual CPU structure of the calling
14765 * thread, for error reporting only.
14766 */
14767DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14768 unsigned iMemMap, PVMCPU pVCpu)
14769{
14770 if (RT_FAILURE_NP(rcStrict))
14771 return rcStrict;
14772
14773 if (RT_FAILURE_NP(rcStrictCommit))
14774 return rcStrictCommit;
14775
14776 if (rcStrict == rcStrictCommit)
14777 return rcStrictCommit;
14778
14779 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14780 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14781 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14782 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14783 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14784 return VERR_IOM_FF_STATUS_IPE;
14785}
14786
14787
14788/**
14789 * Helper for IOMR3ProcessForceFlag.
14790 *
14791 * @returns Merged status code.
14792 * @param rcStrict Current EM status code.
14793 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14794 * with @a rcStrict.
14795 * @param iMemMap The memory mapping index. For error reporting only.
14796 * @param pVCpu The cross context virtual CPU structure of the calling
14797 * thread, for error reporting only.
14798 */
14799DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14800{
14801 /* Simple. */
14802 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14803 return rcStrictCommit;
14804
14805 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14806 return rcStrict;
14807
14808 /* EM scheduling status codes. */
14809 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14810 && rcStrict <= VINF_EM_LAST))
14811 {
14812 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14813 && rcStrictCommit <= VINF_EM_LAST))
14814 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14815 }
14816
14817 /* Unlikely */
14818 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14819}
14820
14821
14822/**
14823 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14824 *
14825 * @returns Merge between @a rcStrict and what the commit operation returned.
14826 * @param pVM The cross context VM structure.
14827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14828 * @param rcStrict The status code returned by ring-0 or raw-mode.
14829 */
14830VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14831{
14832 /*
14833 * Reset the pending commit.
14834 */
14835 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14836 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14837 ("%#x %#x %#x\n",
14838 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14839 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14840
14841 /*
14842 * Commit the pending bounce buffers (usually just one).
14843 */
14844 unsigned cBufs = 0;
14845 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14846 while (iMemMap-- > 0)
14847 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14848 {
14849 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14850 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14851 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14852
14853 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14854 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14855 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14856
14857 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14858 {
14859 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14861 pbBuf,
14862 cbFirst,
14863 PGMACCESSORIGIN_IEM);
14864 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14865 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14866 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14867 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14868 }
14869
14870 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14871 {
14872 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14874 pbBuf + cbFirst,
14875 cbSecond,
14876 PGMACCESSORIGIN_IEM);
14877 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14878 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14879 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14880 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14881 }
14882 cBufs++;
14883 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14884 }
14885
14886 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14887 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14888 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14889 pVCpu->iem.s.cActiveMappings = 0;
14890 return rcStrict;
14891}
14892
14893#endif /* IN_RING3 */
14894
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette