VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 64655

Last change on this file since 64655 was 64655, checked in by vboxsync, 8 years ago

VMM,recompiler: Get rid of PDM APIC interfaces reducing one level of indirection, cleaned up some unused stuff in recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 573.8 KB
Line 
1/* $Id: IEMAll.cpp 64655 2016-11-14 10:46:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
878# ifdef IEM_WITH_CODE_TLB
879 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
880 pVCpu->iem.s.pbInstrBuf = NULL;
881 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
882 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
883 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
884 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
885# else
886 pVCpu->iem.s.offOpcode = 127;
887 pVCpu->iem.s.cbOpcode = 127;
888# endif
889#endif
890
891 pVCpu->iem.s.cActiveMappings = 0;
892 pVCpu->iem.s.iNextMapping = 0;
893 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
894 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
895#ifdef VBOX_WITH_RAW_MODE_NOT_R0
896 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
897 && pCtx->cs.u64Base == 0
898 && pCtx->cs.u32Limit == UINT32_MAX
899 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
900 if (!pVCpu->iem.s.fInPatchCode)
901 CPUMRawLeave(pVCpu, VINF_SUCCESS);
902#endif
903
904#ifdef IEM_VERIFICATION_MODE_FULL
905 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
906 pVCpu->iem.s.fNoRem = true;
907#endif
908}
909
910
911/**
912 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
913 *
914 * @param pVCpu The cross context virtual CPU structure of the
915 * calling thread.
916 */
917DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
918{
919 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
920#ifdef IEM_VERIFICATION_MODE_FULL
921 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
922#endif
923#ifdef VBOX_STRICT
924# ifdef IEM_WITH_CODE_TLB
925# else
926 pVCpu->iem.s.cbOpcode = 0;
927# endif
928#else
929 NOREF(pVCpu);
930#endif
931}
932
933
934/**
935 * Initializes the decoder state.
936 *
937 * iemReInitDecoder is mostly a copy of this function.
938 *
939 * @param pVCpu The cross context virtual CPU structure of the
940 * calling thread.
941 * @param fBypassHandlers Whether to bypass access handlers.
942 */
943DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
944{
945 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
946
947 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
948
949#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
952 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
958#endif
959
960#ifdef VBOX_WITH_RAW_MODE_NOT_R0
961 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
962#endif
963 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
964#ifdef IEM_VERIFICATION_MODE_FULL
965 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
966 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
967#endif
968 IEMMODE enmMode = iemCalcCpuMode(pCtx);
969 pVCpu->iem.s.enmCpuMode = enmMode;
970 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
971 pVCpu->iem.s.enmEffAddrMode = enmMode;
972 if (enmMode != IEMMODE_64BIT)
973 {
974 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
975 pVCpu->iem.s.enmEffOpSize = enmMode;
976 }
977 else
978 {
979 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
980 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
981 }
982 pVCpu->iem.s.fPrefixes = 0;
983 pVCpu->iem.s.uRexReg = 0;
984 pVCpu->iem.s.uRexB = 0;
985 pVCpu->iem.s.uRexIndex = 0;
986 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
987#ifdef IEM_WITH_CODE_TLB
988 pVCpu->iem.s.pbInstrBuf = NULL;
989 pVCpu->iem.s.offInstrNextByte = 0;
990 pVCpu->iem.s.offCurInstrStart = 0;
991# ifdef VBOX_STRICT
992 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
993 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
994 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
995# endif
996#else
997 pVCpu->iem.s.offOpcode = 0;
998 pVCpu->iem.s.cbOpcode = 0;
999#endif
1000 pVCpu->iem.s.cActiveMappings = 0;
1001 pVCpu->iem.s.iNextMapping = 0;
1002 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1003 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1004#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1005 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1006 && pCtx->cs.u64Base == 0
1007 && pCtx->cs.u32Limit == UINT32_MAX
1008 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1009 if (!pVCpu->iem.s.fInPatchCode)
1010 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1011#endif
1012
1013#ifdef DBGFTRACE_ENABLED
1014 switch (enmMode)
1015 {
1016 case IEMMODE_64BIT:
1017 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1018 break;
1019 case IEMMODE_32BIT:
1020 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1021 break;
1022 case IEMMODE_16BIT:
1023 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1024 break;
1025 }
1026#endif
1027}
1028
1029
1030/**
1031 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1032 *
1033 * This is mostly a copy of iemInitDecoder.
1034 *
1035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1036 */
1037DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1038{
1039 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1040
1041 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1042
1043#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1052#endif
1053
1054 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1055#ifdef IEM_VERIFICATION_MODE_FULL
1056 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1057 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1058#endif
1059 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1060 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1061 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1062 pVCpu->iem.s.enmEffAddrMode = enmMode;
1063 if (enmMode != IEMMODE_64BIT)
1064 {
1065 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1066 pVCpu->iem.s.enmEffOpSize = enmMode;
1067 }
1068 else
1069 {
1070 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1071 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1072 }
1073 pVCpu->iem.s.fPrefixes = 0;
1074 pVCpu->iem.s.uRexReg = 0;
1075 pVCpu->iem.s.uRexB = 0;
1076 pVCpu->iem.s.uRexIndex = 0;
1077 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1078#ifdef IEM_WITH_CODE_TLB
1079 if (pVCpu->iem.s.pbInstrBuf)
1080 {
1081 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1082 - pVCpu->iem.s.uInstrBufPc;
1083 if (off < pVCpu->iem.s.cbInstrBufTotal)
1084 {
1085 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1086 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1087 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1088 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1089 else
1090 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1091 }
1092 else
1093 {
1094 pVCpu->iem.s.pbInstrBuf = NULL;
1095 pVCpu->iem.s.offInstrNextByte = 0;
1096 pVCpu->iem.s.offCurInstrStart = 0;
1097 pVCpu->iem.s.cbInstrBuf = 0;
1098 pVCpu->iem.s.cbInstrBufTotal = 0;
1099 }
1100 }
1101 else
1102 {
1103 pVCpu->iem.s.offInstrNextByte = 0;
1104 pVCpu->iem.s.offCurInstrStart = 0;
1105 pVCpu->iem.s.cbInstrBuf = 0;
1106 pVCpu->iem.s.cbInstrBufTotal = 0;
1107 }
1108#else
1109 pVCpu->iem.s.cbOpcode = 0;
1110 pVCpu->iem.s.offOpcode = 0;
1111#endif
1112 Assert(pVCpu->iem.s.cActiveMappings == 0);
1113 pVCpu->iem.s.iNextMapping = 0;
1114 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1115 Assert(pVCpu->iem.s.fBypassHandlers == false);
1116#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1117 if (!pVCpu->iem.s.fInPatchCode)
1118 { /* likely */ }
1119 else
1120 {
1121 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1122 && pCtx->cs.u64Base == 0
1123 && pCtx->cs.u32Limit == UINT32_MAX
1124 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1125 if (!pVCpu->iem.s.fInPatchCode)
1126 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1127 }
1128#endif
1129
1130#ifdef DBGFTRACE_ENABLED
1131 switch (enmMode)
1132 {
1133 case IEMMODE_64BIT:
1134 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1135 break;
1136 case IEMMODE_32BIT:
1137 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1138 break;
1139 case IEMMODE_16BIT:
1140 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1141 break;
1142 }
1143#endif
1144}
1145
1146
1147
1148/**
1149 * Prefetch opcodes the first time when starting executing.
1150 *
1151 * @returns Strict VBox status code.
1152 * @param pVCpu The cross context virtual CPU structure of the
1153 * calling thread.
1154 * @param fBypassHandlers Whether to bypass access handlers.
1155 */
1156IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1157{
1158#ifdef IEM_VERIFICATION_MODE_FULL
1159 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1160#endif
1161 iemInitDecoder(pVCpu, fBypassHandlers);
1162
1163#ifdef IEM_WITH_CODE_TLB
1164 /** @todo Do ITLB lookup here. */
1165
1166#else /* !IEM_WITH_CODE_TLB */
1167
1168 /*
1169 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1170 *
1171 * First translate CS:rIP to a physical address.
1172 */
1173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1174 uint32_t cbToTryRead;
1175 RTGCPTR GCPtrPC;
1176 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1177 {
1178 cbToTryRead = PAGE_SIZE;
1179 GCPtrPC = pCtx->rip;
1180 if (!IEM_IS_CANONICAL(GCPtrPC))
1181 return iemRaiseGeneralProtectionFault0(pVCpu);
1182 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1183 }
1184 else
1185 {
1186 uint32_t GCPtrPC32 = pCtx->eip;
1187 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1188 if (GCPtrPC32 > pCtx->cs.u32Limit)
1189 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1190 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1191 if (!cbToTryRead) /* overflowed */
1192 {
1193 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1194 cbToTryRead = UINT32_MAX;
1195 }
1196 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1197 Assert(GCPtrPC <= UINT32_MAX);
1198 }
1199
1200# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1201 /* Allow interpretation of patch manager code blocks since they can for
1202 instance throw #PFs for perfectly good reasons. */
1203 if (pVCpu->iem.s.fInPatchCode)
1204 {
1205 size_t cbRead = 0;
1206 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1207 AssertRCReturn(rc, rc);
1208 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1209 return VINF_SUCCESS;
1210 }
1211# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1212
1213 RTGCPHYS GCPhys;
1214 uint64_t fFlags;
1215 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1216 if (RT_FAILURE(rc))
1217 {
1218 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1219 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1220 }
1221 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1222 {
1223 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1224 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1225 }
1226 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1227 {
1228 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1229 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1230 }
1231 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1232 /** @todo Check reserved bits and such stuff. PGM is better at doing
1233 * that, so do it when implementing the guest virtual address
1234 * TLB... */
1235
1236# ifdef IEM_VERIFICATION_MODE_FULL
1237 /*
1238 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1239 * instruction.
1240 */
1241 /** @todo optimize this differently by not using PGMPhysRead. */
1242 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1243 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1244 if ( offPrevOpcodes < cbOldOpcodes
1245 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1246 {
1247 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1248 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1249 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1250 pVCpu->iem.s.cbOpcode = cbNew;
1251 return VINF_SUCCESS;
1252 }
1253# endif
1254
1255 /*
1256 * Read the bytes at this address.
1257 */
1258 PVM pVM = pVCpu->CTX_SUFF(pVM);
1259# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1260 size_t cbActual;
1261 if ( PATMIsEnabled(pVM)
1262 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1263 {
1264 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1265 Assert(cbActual > 0);
1266 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1267 }
1268 else
1269# endif
1270 {
1271 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1272 if (cbToTryRead > cbLeftOnPage)
1273 cbToTryRead = cbLeftOnPage;
1274 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1275 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1276
1277 if (!pVCpu->iem.s.fBypassHandlers)
1278 {
1279 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1280 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1281 { /* likely */ }
1282 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1283 {
1284 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1285 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1286 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1287 }
1288 else
1289 {
1290 Log((RT_SUCCESS(rcStrict)
1291 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1292 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1293 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1294 return rcStrict;
1295 }
1296 }
1297 else
1298 {
1299 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1300 if (RT_SUCCESS(rc))
1301 { /* likely */ }
1302 else
1303 {
1304 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1305 GCPtrPC, GCPhys, rc, cbToTryRead));
1306 return rc;
1307 }
1308 }
1309 pVCpu->iem.s.cbOpcode = cbToTryRead;
1310 }
1311#endif /* !IEM_WITH_CODE_TLB */
1312 return VINF_SUCCESS;
1313}
1314
1315
1316/**
1317 * Invalidates the IEM TLBs.
1318 *
1319 * This is called internally as well as by PGM when moving GC mappings.
1320 *
1321 * @returns
1322 * @param pVCpu The cross context virtual CPU structure of the calling
1323 * thread.
1324 * @param fVmm Set when PGM calls us with a remapping.
1325 */
1326VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1327{
1328#ifdef IEM_WITH_CODE_TLB
1329 pVCpu->iem.s.cbInstrBufTotal = 0;
1330 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1331 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1332 { /* very likely */ }
1333 else
1334 {
1335 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1336 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1337 while (i-- > 0)
1338 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1339 }
1340#endif
1341
1342#ifdef IEM_WITH_DATA_TLB
1343 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1344 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1345 { /* very likely */ }
1346 else
1347 {
1348 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1349 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1350 while (i-- > 0)
1351 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1352 }
1353#endif
1354 NOREF(pVCpu); NOREF(fVmm);
1355}
1356
1357
1358/**
1359 * Invalidates a page in the TLBs.
1360 *
1361 * @param pVCpu The cross context virtual CPU structure of the calling
1362 * thread.
1363 * @param GCPtr The address of the page to invalidate
1364 */
1365VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1366{
1367#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1368 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1369 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1370 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1371 uintptr_t idx = (uint8_t)GCPtr;
1372
1373# ifdef IEM_WITH_CODE_TLB
1374 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1375 {
1376 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1377 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1378 pVCpu->iem.s.cbInstrBufTotal = 0;
1379 }
1380# endif
1381
1382# ifdef IEM_WITH_DATA_TLB
1383 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1384 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1385# endif
1386#else
1387 NOREF(pVCpu); NOREF(GCPtr);
1388#endif
1389}
1390
1391
1392/**
1393 * Invalidates the host physical aspects of the IEM TLBs.
1394 *
1395 * This is called internally as well as by PGM when moving GC mappings.
1396 *
1397 * @param pVCpu The cross context virtual CPU structure of the calling
1398 * thread.
1399 */
1400VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1401{
1402#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1403 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1404
1405# ifdef IEM_WITH_CODE_TLB
1406 pVCpu->iem.s.cbInstrBufTotal = 0;
1407# endif
1408 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1409 if (uTlbPhysRev != 0)
1410 {
1411 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1412 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1413 }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1417 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1418
1419 unsigned i;
1420# ifdef IEM_WITH_CODE_TLB
1421 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1422 while (i-- > 0)
1423 {
1424 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1425 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1426 }
1427# endif
1428# ifdef IEM_WITH_DATA_TLB
1429 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1430 while (i-- > 0)
1431 {
1432 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1433 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1434 }
1435# endif
1436 }
1437#else
1438 NOREF(pVCpu);
1439#endif
1440}
1441
1442
1443/**
1444 * Invalidates the host physical aspects of the IEM TLBs.
1445 *
1446 * This is called internally as well as by PGM when moving GC mappings.
1447 *
1448 * @param pVM The cross context VM structure.
1449 *
1450 * @remarks Caller holds the PGM lock.
1451 */
1452VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1453{
1454 RT_NOREF_PV(pVM);
1455}
1456
1457#ifdef IEM_WITH_CODE_TLB
1458
1459/**
1460 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1461 * failure and jumps.
1462 *
1463 * We end up here for a number of reasons:
1464 * - pbInstrBuf isn't yet initialized.
1465 * - Advancing beyond the buffer boundrary (e.g. cross page).
1466 * - Advancing beyond the CS segment limit.
1467 * - Fetching from non-mappable page (e.g. MMIO).
1468 *
1469 * @param pVCpu The cross context virtual CPU structure of the
1470 * calling thread.
1471 * @param pvDst Where to return the bytes.
1472 * @param cbDst Number of bytes to read.
1473 *
1474 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1475 */
1476IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1477{
1478#ifdef IN_RING3
1479//__debugbreak();
1480#else
1481 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1482#endif
1483 for (;;)
1484 {
1485 Assert(cbDst <= 8);
1486 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1487
1488 /*
1489 * We might have a partial buffer match, deal with that first to make the
1490 * rest simpler. This is the first part of the cross page/buffer case.
1491 */
1492 if (pVCpu->iem.s.pbInstrBuf != NULL)
1493 {
1494 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1495 {
1496 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1497 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1498 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1499
1500 cbDst -= cbCopy;
1501 pvDst = (uint8_t *)pvDst + cbCopy;
1502 offBuf += cbCopy;
1503 pVCpu->iem.s.offInstrNextByte += offBuf;
1504 }
1505 }
1506
1507 /*
1508 * Check segment limit, figuring how much we're allowed to access at this point.
1509 *
1510 * We will fault immediately if RIP is past the segment limit / in non-canonical
1511 * territory. If we do continue, there are one or more bytes to read before we
1512 * end up in trouble and we need to do that first before faulting.
1513 */
1514 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1515 RTGCPTR GCPtrFirst;
1516 uint32_t cbMaxRead;
1517 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1518 {
1519 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1520 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1521 { /* likely */ }
1522 else
1523 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1524 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1525 }
1526 else
1527 {
1528 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1529 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1530 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1531 { /* likely */ }
1532 else
1533 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1534 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1535 if (cbMaxRead != 0)
1536 { /* likely */ }
1537 else
1538 {
1539 /* Overflowed because address is 0 and limit is max. */
1540 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1541 cbMaxRead = X86_PAGE_SIZE;
1542 }
1543 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1544 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1545 if (cbMaxRead2 < cbMaxRead)
1546 cbMaxRead = cbMaxRead2;
1547 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1548 }
1549
1550 /*
1551 * Get the TLB entry for this piece of code.
1552 */
1553 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1556 if (pTlbe->uTag == uTag)
1557 {
1558 /* likely when executing lots of code, otherwise unlikely */
1559# ifdef VBOX_WITH_STATISTICS
1560 pVCpu->iem.s.CodeTlb.cTlbHits++;
1561# endif
1562 }
1563 else
1564 {
1565 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1566# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1567 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1568 {
1569 pTlbe->uTag = uTag;
1570 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1571 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1572 pTlbe->GCPhys = NIL_RTGCPHYS;
1573 pTlbe->pbMappingR3 = NULL;
1574 }
1575 else
1576# endif
1577 {
1578 RTGCPHYS GCPhys;
1579 uint64_t fFlags;
1580 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1581 if (RT_FAILURE(rc))
1582 {
1583 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1584 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1585 }
1586
1587 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1588 pTlbe->uTag = uTag;
1589 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1590 pTlbe->GCPhys = GCPhys;
1591 pTlbe->pbMappingR3 = NULL;
1592 }
1593 }
1594
1595 /*
1596 * Check TLB page table level access flags.
1597 */
1598 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1599 {
1600 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1601 {
1602 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1603 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1604 }
1605 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1606 {
1607 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1608 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1609 }
1610 }
1611
1612# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1613 /*
1614 * Allow interpretation of patch manager code blocks since they can for
1615 * instance throw #PFs for perfectly good reasons.
1616 */
1617 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1618 { /* no unlikely */ }
1619 else
1620 {
1621 /** @todo Could be optimized this a little in ring-3 if we liked. */
1622 size_t cbRead = 0;
1623 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1624 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1625 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1626 return;
1627 }
1628# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1629
1630 /*
1631 * Look up the physical page info if necessary.
1632 */
1633 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1634 { /* not necessary */ }
1635 else
1636 {
1637 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1638 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1639 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1640 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1641 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1642 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1643 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1644 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1645 }
1646
1647# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1648 /*
1649 * Try do a direct read using the pbMappingR3 pointer.
1650 */
1651 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1652 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1653 {
1654 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1655 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1656 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1657 {
1658 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1659 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1660 }
1661 else
1662 {
1663 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1664 Assert(cbInstr < cbMaxRead);
1665 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1666 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1667 }
1668 if (cbDst <= cbMaxRead)
1669 {
1670 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1671 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1672 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1673 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1674 return;
1675 }
1676 pVCpu->iem.s.pbInstrBuf = NULL;
1677
1678 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1679 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1680 }
1681 else
1682# endif
1683#if 0
1684 /*
1685 * If there is no special read handling, so we can read a bit more and
1686 * put it in the prefetch buffer.
1687 */
1688 if ( cbDst < cbMaxRead
1689 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1690 {
1691 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1692 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1693 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1694 { /* likely */ }
1695 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1696 {
1697 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1698 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1699 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1700 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1701 }
1702 else
1703 {
1704 Log((RT_SUCCESS(rcStrict)
1705 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1706 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1707 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1708 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1709 }
1710 }
1711 /*
1712 * Special read handling, so only read exactly what's needed.
1713 * This is a highly unlikely scenario.
1714 */
1715 else
1716#endif
1717 {
1718 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1719 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1720 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1721 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1722 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1723 { /* likely */ }
1724 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1725 {
1726 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1727 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1728 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1729 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1730 }
1731 else
1732 {
1733 Log((RT_SUCCESS(rcStrict)
1734 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1735 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1736 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1737 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1738 }
1739 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1740 if (cbToRead == cbDst)
1741 return;
1742 }
1743
1744 /*
1745 * More to read, loop.
1746 */
1747 cbDst -= cbMaxRead;
1748 pvDst = (uint8_t *)pvDst + cbMaxRead;
1749 }
1750}
1751
1752#else
1753
1754/**
1755 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1756 * exception if it fails.
1757 *
1758 * @returns Strict VBox status code.
1759 * @param pVCpu The cross context virtual CPU structure of the
1760 * calling thread.
1761 * @param cbMin The minimum number of bytes relative offOpcode
1762 * that must be read.
1763 */
1764IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1765{
1766 /*
1767 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1768 *
1769 * First translate CS:rIP to a physical address.
1770 */
1771 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1772 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1773 uint32_t cbToTryRead;
1774 RTGCPTR GCPtrNext;
1775 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1776 {
1777 cbToTryRead = PAGE_SIZE;
1778 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1779 if (!IEM_IS_CANONICAL(GCPtrNext))
1780 return iemRaiseGeneralProtectionFault0(pVCpu);
1781 }
1782 else
1783 {
1784 uint32_t GCPtrNext32 = pCtx->eip;
1785 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1786 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1787 if (GCPtrNext32 > pCtx->cs.u32Limit)
1788 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1789 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1790 if (!cbToTryRead) /* overflowed */
1791 {
1792 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1793 cbToTryRead = UINT32_MAX;
1794 /** @todo check out wrapping around the code segment. */
1795 }
1796 if (cbToTryRead < cbMin - cbLeft)
1797 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1798 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1799 }
1800
1801 /* Only read up to the end of the page, and make sure we don't read more
1802 than the opcode buffer can hold. */
1803 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1804 if (cbToTryRead > cbLeftOnPage)
1805 cbToTryRead = cbLeftOnPage;
1806 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1807 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1808/** @todo r=bird: Convert assertion into undefined opcode exception? */
1809 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1810
1811# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1812 /* Allow interpretation of patch manager code blocks since they can for
1813 instance throw #PFs for perfectly good reasons. */
1814 if (pVCpu->iem.s.fInPatchCode)
1815 {
1816 size_t cbRead = 0;
1817 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1818 AssertRCReturn(rc, rc);
1819 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1820 return VINF_SUCCESS;
1821 }
1822# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1823
1824 RTGCPHYS GCPhys;
1825 uint64_t fFlags;
1826 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1827 if (RT_FAILURE(rc))
1828 {
1829 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1830 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1831 }
1832 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1833 {
1834 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1835 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1836 }
1837 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1840 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1841 }
1842 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1843 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1844 /** @todo Check reserved bits and such stuff. PGM is better at doing
1845 * that, so do it when implementing the guest virtual address
1846 * TLB... */
1847
1848 /*
1849 * Read the bytes at this address.
1850 *
1851 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1852 * and since PATM should only patch the start of an instruction there
1853 * should be no need to check again here.
1854 */
1855 if (!pVCpu->iem.s.fBypassHandlers)
1856 {
1857 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1858 cbToTryRead, PGMACCESSORIGIN_IEM);
1859 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1860 { /* likely */ }
1861 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1862 {
1863 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1864 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1865 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1866 }
1867 else
1868 {
1869 Log((RT_SUCCESS(rcStrict)
1870 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1871 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1872 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1873 return rcStrict;
1874 }
1875 }
1876 else
1877 {
1878 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1879 if (RT_SUCCESS(rc))
1880 { /* likely */ }
1881 else
1882 {
1883 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1884 return rc;
1885 }
1886 }
1887 pVCpu->iem.s.cbOpcode += cbToTryRead;
1888 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1889
1890 return VINF_SUCCESS;
1891}
1892
1893#endif /* !IEM_WITH_CODE_TLB */
1894#ifndef IEM_WITH_SETJMP
1895
1896/**
1897 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1898 *
1899 * @returns Strict VBox status code.
1900 * @param pVCpu The cross context virtual CPU structure of the
1901 * calling thread.
1902 * @param pb Where to return the opcode byte.
1903 */
1904DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1905{
1906 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1907 if (rcStrict == VINF_SUCCESS)
1908 {
1909 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1910 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1911 pVCpu->iem.s.offOpcode = offOpcode + 1;
1912 }
1913 else
1914 *pb = 0;
1915 return rcStrict;
1916}
1917
1918
1919/**
1920 * Fetches the next opcode byte.
1921 *
1922 * @returns Strict VBox status code.
1923 * @param pVCpu The cross context virtual CPU structure of the
1924 * calling thread.
1925 * @param pu8 Where to return the opcode byte.
1926 */
1927DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1928{
1929 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1930 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1931 {
1932 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1933 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1934 return VINF_SUCCESS;
1935 }
1936 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1937}
1938
1939#else /* IEM_WITH_SETJMP */
1940
1941/**
1942 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1943 *
1944 * @returns The opcode byte.
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 */
1947DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1948{
1949# ifdef IEM_WITH_CODE_TLB
1950 uint8_t u8;
1951 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1952 return u8;
1953# else
1954 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1955 if (rcStrict == VINF_SUCCESS)
1956 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1958# endif
1959}
1960
1961
1962/**
1963 * Fetches the next opcode byte, longjmp on error.
1964 *
1965 * @returns The opcode byte.
1966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1967 */
1968DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1969{
1970# ifdef IEM_WITH_CODE_TLB
1971 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1972 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1973 if (RT_LIKELY( pbBuf != NULL
1974 && offBuf < pVCpu->iem.s.cbInstrBuf))
1975 {
1976 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1977 return pbBuf[offBuf];
1978 }
1979# else
1980 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1981 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1982 {
1983 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1984 return pVCpu->iem.s.abOpcode[offOpcode];
1985 }
1986# endif
1987 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1988}
1989
1990#endif /* IEM_WITH_SETJMP */
1991
1992/**
1993 * Fetches the next opcode byte, returns automatically on failure.
1994 *
1995 * @param a_pu8 Where to return the opcode byte.
1996 * @remark Implicitly references pVCpu.
1997 */
1998#ifndef IEM_WITH_SETJMP
1999# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2000 do \
2001 { \
2002 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2003 if (rcStrict2 == VINF_SUCCESS) \
2004 { /* likely */ } \
2005 else \
2006 return rcStrict2; \
2007 } while (0)
2008#else
2009# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2010#endif /* IEM_WITH_SETJMP */
2011
2012
2013#ifndef IEM_WITH_SETJMP
2014/**
2015 * Fetches the next signed byte from the opcode stream.
2016 *
2017 * @returns Strict VBox status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param pi8 Where to return the signed byte.
2020 */
2021DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2022{
2023 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2024}
2025#endif /* !IEM_WITH_SETJMP */
2026
2027
2028/**
2029 * Fetches the next signed byte from the opcode stream, returning automatically
2030 * on failure.
2031 *
2032 * @param a_pi8 Where to return the signed byte.
2033 * @remark Implicitly references pVCpu.
2034 */
2035#ifndef IEM_WITH_SETJMP
2036# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2037 do \
2038 { \
2039 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2040 if (rcStrict2 != VINF_SUCCESS) \
2041 return rcStrict2; \
2042 } while (0)
2043#else /* IEM_WITH_SETJMP */
2044# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2045
2046#endif /* IEM_WITH_SETJMP */
2047
2048#ifndef IEM_WITH_SETJMP
2049
2050/**
2051 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2052 *
2053 * @returns Strict VBox status code.
2054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2055 * @param pu16 Where to return the opcode dword.
2056 */
2057DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2058{
2059 uint8_t u8;
2060 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2061 if (rcStrict == VINF_SUCCESS)
2062 *pu16 = (int8_t)u8;
2063 return rcStrict;
2064}
2065
2066
2067/**
2068 * Fetches the next signed byte from the opcode stream, extending it to
2069 * unsigned 16-bit.
2070 *
2071 * @returns Strict VBox status code.
2072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2073 * @param pu16 Where to return the unsigned word.
2074 */
2075DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2076{
2077 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2078 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2079 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2080
2081 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2082 pVCpu->iem.s.offOpcode = offOpcode + 1;
2083 return VINF_SUCCESS;
2084}
2085
2086#endif /* !IEM_WITH_SETJMP */
2087
2088/**
2089 * Fetches the next signed byte from the opcode stream and sign-extending it to
2090 * a word, returning automatically on failure.
2091 *
2092 * @param a_pu16 Where to return the word.
2093 * @remark Implicitly references pVCpu.
2094 */
2095#ifndef IEM_WITH_SETJMP
2096# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2097 do \
2098 { \
2099 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2100 if (rcStrict2 != VINF_SUCCESS) \
2101 return rcStrict2; \
2102 } while (0)
2103#else
2104# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2105#endif
2106
2107#ifndef IEM_WITH_SETJMP
2108
2109/**
2110 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2111 *
2112 * @returns Strict VBox status code.
2113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2114 * @param pu32 Where to return the opcode dword.
2115 */
2116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2117{
2118 uint8_t u8;
2119 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2120 if (rcStrict == VINF_SUCCESS)
2121 *pu32 = (int8_t)u8;
2122 return rcStrict;
2123}
2124
2125
2126/**
2127 * Fetches the next signed byte from the opcode stream, extending it to
2128 * unsigned 32-bit.
2129 *
2130 * @returns Strict VBox status code.
2131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2132 * @param pu32 Where to return the unsigned dword.
2133 */
2134DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2135{
2136 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2137 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2138 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2139
2140 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2141 pVCpu->iem.s.offOpcode = offOpcode + 1;
2142 return VINF_SUCCESS;
2143}
2144
2145#endif /* !IEM_WITH_SETJMP */
2146
2147/**
2148 * Fetches the next signed byte from the opcode stream and sign-extending it to
2149 * a word, returning automatically on failure.
2150 *
2151 * @param a_pu32 Where to return the word.
2152 * @remark Implicitly references pVCpu.
2153 */
2154#ifndef IEM_WITH_SETJMP
2155#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2156 do \
2157 { \
2158 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2159 if (rcStrict2 != VINF_SUCCESS) \
2160 return rcStrict2; \
2161 } while (0)
2162#else
2163# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2164#endif
2165
2166#ifndef IEM_WITH_SETJMP
2167
2168/**
2169 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2170 *
2171 * @returns Strict VBox status code.
2172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2173 * @param pu64 Where to return the opcode qword.
2174 */
2175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2176{
2177 uint8_t u8;
2178 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2179 if (rcStrict == VINF_SUCCESS)
2180 *pu64 = (int8_t)u8;
2181 return rcStrict;
2182}
2183
2184
2185/**
2186 * Fetches the next signed byte from the opcode stream, extending it to
2187 * unsigned 64-bit.
2188 *
2189 * @returns Strict VBox status code.
2190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2191 * @param pu64 Where to return the unsigned qword.
2192 */
2193DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2194{
2195 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2196 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2197 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2198
2199 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2200 pVCpu->iem.s.offOpcode = offOpcode + 1;
2201 return VINF_SUCCESS;
2202}
2203
2204#endif /* !IEM_WITH_SETJMP */
2205
2206
2207/**
2208 * Fetches the next signed byte from the opcode stream and sign-extending it to
2209 * a word, returning automatically on failure.
2210 *
2211 * @param a_pu64 Where to return the word.
2212 * @remark Implicitly references pVCpu.
2213 */
2214#ifndef IEM_WITH_SETJMP
2215# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2216 do \
2217 { \
2218 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2219 if (rcStrict2 != VINF_SUCCESS) \
2220 return rcStrict2; \
2221 } while (0)
2222#else
2223# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2224#endif
2225
2226
2227#ifndef IEM_WITH_SETJMP
2228
2229/**
2230 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2231 *
2232 * @returns Strict VBox status code.
2233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2234 * @param pu16 Where to return the opcode word.
2235 */
2236DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2237{
2238 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2239 if (rcStrict == VINF_SUCCESS)
2240 {
2241 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2242# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2243 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2244# else
2245 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2246# endif
2247 pVCpu->iem.s.offOpcode = offOpcode + 2;
2248 }
2249 else
2250 *pu16 = 0;
2251 return rcStrict;
2252}
2253
2254
2255/**
2256 * Fetches the next opcode word.
2257 *
2258 * @returns Strict VBox status code.
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param pu16 Where to return the opcode word.
2261 */
2262DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2263{
2264 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2265 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2266 {
2267 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2268# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2269 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2270# else
2271 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2272# endif
2273 return VINF_SUCCESS;
2274 }
2275 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2276}
2277
2278#else /* IEM_WITH_SETJMP */
2279
2280/**
2281 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2282 *
2283 * @returns The opcode word.
2284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2285 */
2286DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2287{
2288# ifdef IEM_WITH_CODE_TLB
2289 uint16_t u16;
2290 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2291 return u16;
2292# else
2293 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2294 if (rcStrict == VINF_SUCCESS)
2295 {
2296 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2297 pVCpu->iem.s.offOpcode += 2;
2298# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2299 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2300# else
2301 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2302# endif
2303 }
2304 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2305# endif
2306}
2307
2308
2309/**
2310 * Fetches the next opcode word, longjmp on error.
2311 *
2312 * @returns The opcode word.
2313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2314 */
2315DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2316{
2317# ifdef IEM_WITH_CODE_TLB
2318 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2319 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2320 if (RT_LIKELY( pbBuf != NULL
2321 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2322 {
2323 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 return *(uint16_t const *)&pbBuf[offBuf];
2326# else
2327 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2328# endif
2329 }
2330# else
2331 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2332 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2333 {
2334 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2335# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2336 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2337# else
2338 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2339# endif
2340 }
2341# endif
2342 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2343}
2344
2345#endif /* IEM_WITH_SETJMP */
2346
2347
2348/**
2349 * Fetches the next opcode word, returns automatically on failure.
2350 *
2351 * @param a_pu16 Where to return the opcode word.
2352 * @remark Implicitly references pVCpu.
2353 */
2354#ifndef IEM_WITH_SETJMP
2355# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2356 do \
2357 { \
2358 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2359 if (rcStrict2 != VINF_SUCCESS) \
2360 return rcStrict2; \
2361 } while (0)
2362#else
2363# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2364#endif
2365
2366#ifndef IEM_WITH_SETJMP
2367
2368/**
2369 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2370 *
2371 * @returns Strict VBox status code.
2372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2373 * @param pu32 Where to return the opcode double word.
2374 */
2375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2376{
2377 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2378 if (rcStrict == VINF_SUCCESS)
2379 {
2380 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2381 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2382 pVCpu->iem.s.offOpcode = offOpcode + 2;
2383 }
2384 else
2385 *pu32 = 0;
2386 return rcStrict;
2387}
2388
2389
2390/**
2391 * Fetches the next opcode word, zero extending it to a double word.
2392 *
2393 * @returns Strict VBox status code.
2394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2395 * @param pu32 Where to return the opcode double word.
2396 */
2397DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2398{
2399 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2400 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2401 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2402
2403 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2404 pVCpu->iem.s.offOpcode = offOpcode + 2;
2405 return VINF_SUCCESS;
2406}
2407
2408#endif /* !IEM_WITH_SETJMP */
2409
2410
2411/**
2412 * Fetches the next opcode word and zero extends it to a double word, returns
2413 * automatically on failure.
2414 *
2415 * @param a_pu32 Where to return the opcode double word.
2416 * @remark Implicitly references pVCpu.
2417 */
2418#ifndef IEM_WITH_SETJMP
2419# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2420 do \
2421 { \
2422 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2423 if (rcStrict2 != VINF_SUCCESS) \
2424 return rcStrict2; \
2425 } while (0)
2426#else
2427# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2428#endif
2429
2430#ifndef IEM_WITH_SETJMP
2431
2432/**
2433 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2434 *
2435 * @returns Strict VBox status code.
2436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2437 * @param pu64 Where to return the opcode quad word.
2438 */
2439DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2440{
2441 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2442 if (rcStrict == VINF_SUCCESS)
2443 {
2444 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2445 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2446 pVCpu->iem.s.offOpcode = offOpcode + 2;
2447 }
2448 else
2449 *pu64 = 0;
2450 return rcStrict;
2451}
2452
2453
2454/**
2455 * Fetches the next opcode word, zero extending it to a quad word.
2456 *
2457 * @returns Strict VBox status code.
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pu64 Where to return the opcode quad word.
2460 */
2461DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2462{
2463 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2464 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2465 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2466
2467 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2468 pVCpu->iem.s.offOpcode = offOpcode + 2;
2469 return VINF_SUCCESS;
2470}
2471
2472#endif /* !IEM_WITH_SETJMP */
2473
2474/**
2475 * Fetches the next opcode word and zero extends it to a quad word, returns
2476 * automatically on failure.
2477 *
2478 * @param a_pu64 Where to return the opcode quad word.
2479 * @remark Implicitly references pVCpu.
2480 */
2481#ifndef IEM_WITH_SETJMP
2482# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2483 do \
2484 { \
2485 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2486 if (rcStrict2 != VINF_SUCCESS) \
2487 return rcStrict2; \
2488 } while (0)
2489#else
2490# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2491#endif
2492
2493
2494#ifndef IEM_WITH_SETJMP
2495/**
2496 * Fetches the next signed word from the opcode stream.
2497 *
2498 * @returns Strict VBox status code.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 * @param pi16 Where to return the signed word.
2501 */
2502DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2503{
2504 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2505}
2506#endif /* !IEM_WITH_SETJMP */
2507
2508
2509/**
2510 * Fetches the next signed word from the opcode stream, returning automatically
2511 * on failure.
2512 *
2513 * @param a_pi16 Where to return the signed word.
2514 * @remark Implicitly references pVCpu.
2515 */
2516#ifndef IEM_WITH_SETJMP
2517# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2518 do \
2519 { \
2520 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2521 if (rcStrict2 != VINF_SUCCESS) \
2522 return rcStrict2; \
2523 } while (0)
2524#else
2525# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2526#endif
2527
2528#ifndef IEM_WITH_SETJMP
2529
2530/**
2531 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2532 *
2533 * @returns Strict VBox status code.
2534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2535 * @param pu32 Where to return the opcode dword.
2536 */
2537DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2538{
2539 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2540 if (rcStrict == VINF_SUCCESS)
2541 {
2542 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2543# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2544 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2545# else
2546 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2547 pVCpu->iem.s.abOpcode[offOpcode + 1],
2548 pVCpu->iem.s.abOpcode[offOpcode + 2],
2549 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2550# endif
2551 pVCpu->iem.s.offOpcode = offOpcode + 4;
2552 }
2553 else
2554 *pu32 = 0;
2555 return rcStrict;
2556}
2557
2558
2559/**
2560 * Fetches the next opcode dword.
2561 *
2562 * @returns Strict VBox status code.
2563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2564 * @param pu32 Where to return the opcode double word.
2565 */
2566DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2567{
2568 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2569 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2570 {
2571 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2572# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2573 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2574# else
2575 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2576 pVCpu->iem.s.abOpcode[offOpcode + 1],
2577 pVCpu->iem.s.abOpcode[offOpcode + 2],
2578 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2579# endif
2580 return VINF_SUCCESS;
2581 }
2582 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2583}
2584
2585#else /* !IEM_WITH_SETJMP */
2586
2587/**
2588 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2589 *
2590 * @returns The opcode dword.
2591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2592 */
2593DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2594{
2595# ifdef IEM_WITH_CODE_TLB
2596 uint32_t u32;
2597 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2598 return u32;
2599# else
2600 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2601 if (rcStrict == VINF_SUCCESS)
2602 {
2603 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2604 pVCpu->iem.s.offOpcode = offOpcode + 4;
2605# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2606 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2607# else
2608 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2609 pVCpu->iem.s.abOpcode[offOpcode + 1],
2610 pVCpu->iem.s.abOpcode[offOpcode + 2],
2611 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2612# endif
2613 }
2614 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2615# endif
2616}
2617
2618
2619/**
2620 * Fetches the next opcode dword, longjmp on error.
2621 *
2622 * @returns The opcode dword.
2623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2624 */
2625DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2626{
2627# ifdef IEM_WITH_CODE_TLB
2628 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2629 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2630 if (RT_LIKELY( pbBuf != NULL
2631 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2632 {
2633 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2634# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2635 return *(uint32_t const *)&pbBuf[offBuf];
2636# else
2637 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2638 pbBuf[offBuf + 1],
2639 pbBuf[offBuf + 2],
2640 pbBuf[offBuf + 3]);
2641# endif
2642 }
2643# else
2644 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2645 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2646 {
2647 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2649 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2650# else
2651 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2652 pVCpu->iem.s.abOpcode[offOpcode + 1],
2653 pVCpu->iem.s.abOpcode[offOpcode + 2],
2654 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2655# endif
2656 }
2657# endif
2658 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663
2664/**
2665 * Fetches the next opcode dword, returns automatically on failure.
2666 *
2667 * @param a_pu32 Where to return the opcode dword.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2680#endif
2681
2682#ifndef IEM_WITH_SETJMP
2683
2684/**
2685 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pu64 Where to return the opcode dword.
2690 */
2691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2692{
2693 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2694 if (rcStrict == VINF_SUCCESS)
2695 {
2696 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2697 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2698 pVCpu->iem.s.abOpcode[offOpcode + 1],
2699 pVCpu->iem.s.abOpcode[offOpcode + 2],
2700 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2701 pVCpu->iem.s.offOpcode = offOpcode + 4;
2702 }
2703 else
2704 *pu64 = 0;
2705 return rcStrict;
2706}
2707
2708
2709/**
2710 * Fetches the next opcode dword, zero extending it to a quad word.
2711 *
2712 * @returns Strict VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2714 * @param pu64 Where to return the opcode quad word.
2715 */
2716DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2717{
2718 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2719 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2720 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2721
2722 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2723 pVCpu->iem.s.abOpcode[offOpcode + 1],
2724 pVCpu->iem.s.abOpcode[offOpcode + 2],
2725 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2726 pVCpu->iem.s.offOpcode = offOpcode + 4;
2727 return VINF_SUCCESS;
2728}
2729
2730#endif /* !IEM_WITH_SETJMP */
2731
2732
2733/**
2734 * Fetches the next opcode dword and zero extends it to a quad word, returns
2735 * automatically on failure.
2736 *
2737 * @param a_pu64 Where to return the opcode quad word.
2738 * @remark Implicitly references pVCpu.
2739 */
2740#ifndef IEM_WITH_SETJMP
2741# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2742 do \
2743 { \
2744 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2745 if (rcStrict2 != VINF_SUCCESS) \
2746 return rcStrict2; \
2747 } while (0)
2748#else
2749# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2750#endif
2751
2752
2753#ifndef IEM_WITH_SETJMP
2754/**
2755 * Fetches the next signed double word from the opcode stream.
2756 *
2757 * @returns Strict VBox status code.
2758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2759 * @param pi32 Where to return the signed double word.
2760 */
2761DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2762{
2763 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2764}
2765#endif
2766
2767/**
2768 * Fetches the next signed double word from the opcode stream, returning
2769 * automatically on failure.
2770 *
2771 * @param a_pi32 Where to return the signed double word.
2772 * @remark Implicitly references pVCpu.
2773 */
2774#ifndef IEM_WITH_SETJMP
2775# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2776 do \
2777 { \
2778 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2779 if (rcStrict2 != VINF_SUCCESS) \
2780 return rcStrict2; \
2781 } while (0)
2782#else
2783# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2784#endif
2785
2786#ifndef IEM_WITH_SETJMP
2787
2788/**
2789 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2790 *
2791 * @returns Strict VBox status code.
2792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2793 * @param pu64 Where to return the opcode qword.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2796{
2797 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2798 if (rcStrict == VINF_SUCCESS)
2799 {
2800 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2801 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2802 pVCpu->iem.s.abOpcode[offOpcode + 1],
2803 pVCpu->iem.s.abOpcode[offOpcode + 2],
2804 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2805 pVCpu->iem.s.offOpcode = offOpcode + 4;
2806 }
2807 else
2808 *pu64 = 0;
2809 return rcStrict;
2810}
2811
2812
2813/**
2814 * Fetches the next opcode dword, sign extending it into a quad word.
2815 *
2816 * @returns Strict VBox status code.
2817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2818 * @param pu64 Where to return the opcode quad word.
2819 */
2820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2821{
2822 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2823 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2824 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2825
2826 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2827 pVCpu->iem.s.abOpcode[offOpcode + 1],
2828 pVCpu->iem.s.abOpcode[offOpcode + 2],
2829 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2830 *pu64 = i32;
2831 pVCpu->iem.s.offOpcode = offOpcode + 4;
2832 return VINF_SUCCESS;
2833}
2834
2835#endif /* !IEM_WITH_SETJMP */
2836
2837
2838/**
2839 * Fetches the next opcode double word and sign extends it to a quad word,
2840 * returns automatically on failure.
2841 *
2842 * @param a_pu64 Where to return the opcode quad word.
2843 * @remark Implicitly references pVCpu.
2844 */
2845#ifndef IEM_WITH_SETJMP
2846# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2847 do \
2848 { \
2849 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2850 if (rcStrict2 != VINF_SUCCESS) \
2851 return rcStrict2; \
2852 } while (0)
2853#else
2854# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2855#endif
2856
2857#ifndef IEM_WITH_SETJMP
2858
2859/**
2860 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2861 *
2862 * @returns Strict VBox status code.
2863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2864 * @param pu64 Where to return the opcode qword.
2865 */
2866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2867{
2868 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2869 if (rcStrict == VINF_SUCCESS)
2870 {
2871 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2872# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2873 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2874# else
2875 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2876 pVCpu->iem.s.abOpcode[offOpcode + 1],
2877 pVCpu->iem.s.abOpcode[offOpcode + 2],
2878 pVCpu->iem.s.abOpcode[offOpcode + 3],
2879 pVCpu->iem.s.abOpcode[offOpcode + 4],
2880 pVCpu->iem.s.abOpcode[offOpcode + 5],
2881 pVCpu->iem.s.abOpcode[offOpcode + 6],
2882 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2883# endif
2884 pVCpu->iem.s.offOpcode = offOpcode + 8;
2885 }
2886 else
2887 *pu64 = 0;
2888 return rcStrict;
2889}
2890
2891
2892/**
2893 * Fetches the next opcode qword.
2894 *
2895 * @returns Strict VBox status code.
2896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2897 * @param pu64 Where to return the opcode qword.
2898 */
2899DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2900{
2901 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2902 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2903 {
2904# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2905 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2906# else
2907 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3],
2911 pVCpu->iem.s.abOpcode[offOpcode + 4],
2912 pVCpu->iem.s.abOpcode[offOpcode + 5],
2913 pVCpu->iem.s.abOpcode[offOpcode + 6],
2914 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2915# endif
2916 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2917 return VINF_SUCCESS;
2918 }
2919 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2920}
2921
2922#else /* IEM_WITH_SETJMP */
2923
2924/**
2925 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2926 *
2927 * @returns The opcode qword.
2928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2929 */
2930DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2931{
2932# ifdef IEM_WITH_CODE_TLB
2933 uint64_t u64;
2934 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2935 return u64;
2936# else
2937 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2938 if (rcStrict == VINF_SUCCESS)
2939 {
2940 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2941 pVCpu->iem.s.offOpcode = offOpcode + 8;
2942# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2943 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2944# else
2945 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2946 pVCpu->iem.s.abOpcode[offOpcode + 1],
2947 pVCpu->iem.s.abOpcode[offOpcode + 2],
2948 pVCpu->iem.s.abOpcode[offOpcode + 3],
2949 pVCpu->iem.s.abOpcode[offOpcode + 4],
2950 pVCpu->iem.s.abOpcode[offOpcode + 5],
2951 pVCpu->iem.s.abOpcode[offOpcode + 6],
2952 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2953# endif
2954 }
2955 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2956# endif
2957}
2958
2959
2960/**
2961 * Fetches the next opcode qword, longjmp on error.
2962 *
2963 * @returns The opcode qword.
2964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2965 */
2966DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2967{
2968# ifdef IEM_WITH_CODE_TLB
2969 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2970 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2971 if (RT_LIKELY( pbBuf != NULL
2972 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2973 {
2974 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2975# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2976 return *(uint64_t const *)&pbBuf[offBuf];
2977# else
2978 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2979 pbBuf[offBuf + 1],
2980 pbBuf[offBuf + 2],
2981 pbBuf[offBuf + 3],
2982 pbBuf[offBuf + 4],
2983 pbBuf[offBuf + 5],
2984 pbBuf[offBuf + 6],
2985 pbBuf[offBuf + 7]);
2986# endif
2987 }
2988# else
2989 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2990 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2991 {
2992 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2993# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2994 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2995# else
2996 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2997 pVCpu->iem.s.abOpcode[offOpcode + 1],
2998 pVCpu->iem.s.abOpcode[offOpcode + 2],
2999 pVCpu->iem.s.abOpcode[offOpcode + 3],
3000 pVCpu->iem.s.abOpcode[offOpcode + 4],
3001 pVCpu->iem.s.abOpcode[offOpcode + 5],
3002 pVCpu->iem.s.abOpcode[offOpcode + 6],
3003 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3004# endif
3005 }
3006# endif
3007 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3008}
3009
3010#endif /* IEM_WITH_SETJMP */
3011
3012/**
3013 * Fetches the next opcode quad word, returns automatically on failure.
3014 *
3015 * @param a_pu64 Where to return the opcode quad word.
3016 * @remark Implicitly references pVCpu.
3017 */
3018#ifndef IEM_WITH_SETJMP
3019# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3020 do \
3021 { \
3022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3023 if (rcStrict2 != VINF_SUCCESS) \
3024 return rcStrict2; \
3025 } while (0)
3026#else
3027# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3028#endif
3029
3030
3031/** @name Misc Worker Functions.
3032 * @{
3033 */
3034
3035
3036/**
3037 * Validates a new SS segment.
3038 *
3039 * @returns VBox strict status code.
3040 * @param pVCpu The cross context virtual CPU structure of the
3041 * calling thread.
3042 * @param pCtx The CPU context.
3043 * @param NewSS The new SS selctor.
3044 * @param uCpl The CPL to load the stack for.
3045 * @param pDesc Where to return the descriptor.
3046 */
3047IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3048{
3049 NOREF(pCtx);
3050
3051 /* Null selectors are not allowed (we're not called for dispatching
3052 interrupts with SS=0 in long mode). */
3053 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3054 {
3055 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3056 return iemRaiseTaskSwitchFault0(pVCpu);
3057 }
3058
3059 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3060 if ((NewSS & X86_SEL_RPL) != uCpl)
3061 {
3062 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3063 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3064 }
3065
3066 /*
3067 * Read the descriptor.
3068 */
3069 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3070 if (rcStrict != VINF_SUCCESS)
3071 return rcStrict;
3072
3073 /*
3074 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3075 */
3076 if (!pDesc->Legacy.Gen.u1DescType)
3077 {
3078 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3079 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3080 }
3081
3082 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3083 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3084 {
3085 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3086 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3087 }
3088 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3089 {
3090 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3091 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3092 }
3093
3094 /* Is it there? */
3095 /** @todo testcase: Is this checked before the canonical / limit check below? */
3096 if (!pDesc->Legacy.Gen.u1Present)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3099 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3100 }
3101
3102 return VINF_SUCCESS;
3103}
3104
3105
3106/**
3107 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3108 * not.
3109 *
3110 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3111 * @param a_pCtx The CPU context.
3112 */
3113#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3114# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3115 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3116 ? (a_pCtx)->eflags.u \
3117 : CPUMRawGetEFlags(a_pVCpu) )
3118#else
3119# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3120 ( (a_pCtx)->eflags.u )
3121#endif
3122
3123/**
3124 * Updates the EFLAGS in the correct manner wrt. PATM.
3125 *
3126 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3127 * @param a_pCtx The CPU context.
3128 * @param a_fEfl The new EFLAGS.
3129 */
3130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3131# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3132 do { \
3133 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3134 (a_pCtx)->eflags.u = (a_fEfl); \
3135 else \
3136 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3137 } while (0)
3138#else
3139# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3140 do { \
3141 (a_pCtx)->eflags.u = (a_fEfl); \
3142 } while (0)
3143#endif
3144
3145
3146/** @} */
3147
3148/** @name Raising Exceptions.
3149 *
3150 * @{
3151 */
3152
3153/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3154 * @{ */
3155/** CPU exception. */
3156#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3157/** External interrupt (from PIC, APIC, whatever). */
3158#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3159/** Software interrupt (int or into, not bound).
3160 * Returns to the following instruction */
3161#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3162/** Takes an error code. */
3163#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3164/** Takes a CR2. */
3165#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3166/** Generated by the breakpoint instruction. */
3167#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3168/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3169#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3170/** @} */
3171
3172
3173/**
3174 * Loads the specified stack far pointer from the TSS.
3175 *
3176 * @returns VBox strict status code.
3177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3178 * @param pCtx The CPU context.
3179 * @param uCpl The CPL to load the stack for.
3180 * @param pSelSS Where to return the new stack segment.
3181 * @param puEsp Where to return the new stack pointer.
3182 */
3183IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3184 PRTSEL pSelSS, uint32_t *puEsp)
3185{
3186 VBOXSTRICTRC rcStrict;
3187 Assert(uCpl < 4);
3188
3189 switch (pCtx->tr.Attr.n.u4Type)
3190 {
3191 /*
3192 * 16-bit TSS (X86TSS16).
3193 */
3194 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3195 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3196 {
3197 uint32_t off = uCpl * 4 + 2;
3198 if (off + 4 <= pCtx->tr.u32Limit)
3199 {
3200 /** @todo check actual access pattern here. */
3201 uint32_t u32Tmp = 0; /* gcc maybe... */
3202 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3203 if (rcStrict == VINF_SUCCESS)
3204 {
3205 *puEsp = RT_LOWORD(u32Tmp);
3206 *pSelSS = RT_HIWORD(u32Tmp);
3207 return VINF_SUCCESS;
3208 }
3209 }
3210 else
3211 {
3212 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3213 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3214 }
3215 break;
3216 }
3217
3218 /*
3219 * 32-bit TSS (X86TSS32).
3220 */
3221 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3222 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3223 {
3224 uint32_t off = uCpl * 8 + 4;
3225 if (off + 7 <= pCtx->tr.u32Limit)
3226 {
3227/** @todo check actual access pattern here. */
3228 uint64_t u64Tmp;
3229 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3230 if (rcStrict == VINF_SUCCESS)
3231 {
3232 *puEsp = u64Tmp & UINT32_MAX;
3233 *pSelSS = (RTSEL)(u64Tmp >> 32);
3234 return VINF_SUCCESS;
3235 }
3236 }
3237 else
3238 {
3239 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3240 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3241 }
3242 break;
3243 }
3244
3245 default:
3246 AssertFailed();
3247 rcStrict = VERR_IEM_IPE_4;
3248 break;
3249 }
3250
3251 *puEsp = 0; /* make gcc happy */
3252 *pSelSS = 0; /* make gcc happy */
3253 return rcStrict;
3254}
3255
3256
3257/**
3258 * Loads the specified stack pointer from the 64-bit TSS.
3259 *
3260 * @returns VBox strict status code.
3261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3262 * @param pCtx The CPU context.
3263 * @param uCpl The CPL to load the stack for.
3264 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3265 * @param puRsp Where to return the new stack pointer.
3266 */
3267IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3268{
3269 Assert(uCpl < 4);
3270 Assert(uIst < 8);
3271 *puRsp = 0; /* make gcc happy */
3272
3273 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3274
3275 uint32_t off;
3276 if (uIst)
3277 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3278 else
3279 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3280 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3281 {
3282 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3283 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3284 }
3285
3286 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3287}
3288
3289
3290/**
3291 * Adjust the CPU state according to the exception being raised.
3292 *
3293 * @param pCtx The CPU context.
3294 * @param u8Vector The exception that has been raised.
3295 */
3296DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3297{
3298 switch (u8Vector)
3299 {
3300 case X86_XCPT_DB:
3301 pCtx->dr[7] &= ~X86_DR7_GD;
3302 break;
3303 /** @todo Read the AMD and Intel exception reference... */
3304 }
3305}
3306
3307
3308/**
3309 * Implements exceptions and interrupts for real mode.
3310 *
3311 * @returns VBox strict status code.
3312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3313 * @param pCtx The CPU context.
3314 * @param cbInstr The number of bytes to offset rIP by in the return
3315 * address.
3316 * @param u8Vector The interrupt / exception vector number.
3317 * @param fFlags The flags.
3318 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3319 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3320 */
3321IEM_STATIC VBOXSTRICTRC
3322iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3323 PCPUMCTX pCtx,
3324 uint8_t cbInstr,
3325 uint8_t u8Vector,
3326 uint32_t fFlags,
3327 uint16_t uErr,
3328 uint64_t uCr2)
3329{
3330 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3331 NOREF(uErr); NOREF(uCr2);
3332
3333 /*
3334 * Read the IDT entry.
3335 */
3336 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3337 {
3338 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3339 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3340 }
3341 RTFAR16 Idte;
3342 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3343 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3344 return rcStrict;
3345
3346 /*
3347 * Push the stack frame.
3348 */
3349 uint16_t *pu16Frame;
3350 uint64_t uNewRsp;
3351 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3352 if (rcStrict != VINF_SUCCESS)
3353 return rcStrict;
3354
3355 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3356#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3357 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3358 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3359 fEfl |= UINT16_C(0xf000);
3360#endif
3361 pu16Frame[2] = (uint16_t)fEfl;
3362 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3363 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3364 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3365 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3366 return rcStrict;
3367
3368 /*
3369 * Load the vector address into cs:ip and make exception specific state
3370 * adjustments.
3371 */
3372 pCtx->cs.Sel = Idte.sel;
3373 pCtx->cs.ValidSel = Idte.sel;
3374 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3375 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3376 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3377 pCtx->rip = Idte.off;
3378 fEfl &= ~X86_EFL_IF;
3379 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3380
3381 /** @todo do we actually do this in real mode? */
3382 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3383 iemRaiseXcptAdjustState(pCtx, u8Vector);
3384
3385 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3386}
3387
3388
3389/**
3390 * Loads a NULL data selector into when coming from V8086 mode.
3391 *
3392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3393 * @param pSReg Pointer to the segment register.
3394 */
3395IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3396{
3397 pSReg->Sel = 0;
3398 pSReg->ValidSel = 0;
3399 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3400 {
3401 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3402 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3403 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3404 }
3405 else
3406 {
3407 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3408 /** @todo check this on AMD-V */
3409 pSReg->u64Base = 0;
3410 pSReg->u32Limit = 0;
3411 }
3412}
3413
3414
3415/**
3416 * Loads a segment selector during a task switch in V8086 mode.
3417 *
3418 * @param pSReg Pointer to the segment register.
3419 * @param uSel The selector value to load.
3420 */
3421IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3422{
3423 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3424 pSReg->Sel = uSel;
3425 pSReg->ValidSel = uSel;
3426 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3427 pSReg->u64Base = uSel << 4;
3428 pSReg->u32Limit = 0xffff;
3429 pSReg->Attr.u = 0xf3;
3430}
3431
3432
3433/**
3434 * Loads a NULL data selector into a selector register, both the hidden and
3435 * visible parts, in protected mode.
3436 *
3437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3438 * @param pSReg Pointer to the segment register.
3439 * @param uRpl The RPL.
3440 */
3441IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3442{
3443 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3444 * data selector in protected mode. */
3445 pSReg->Sel = uRpl;
3446 pSReg->ValidSel = uRpl;
3447 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3448 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3449 {
3450 /* VT-x (Intel 3960x) observed doing something like this. */
3451 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3452 pSReg->u32Limit = UINT32_MAX;
3453 pSReg->u64Base = 0;
3454 }
3455 else
3456 {
3457 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3458 pSReg->u32Limit = 0;
3459 pSReg->u64Base = 0;
3460 }
3461}
3462
3463
3464/**
3465 * Loads a segment selector during a task switch in protected mode.
3466 *
3467 * In this task switch scenario, we would throw \#TS exceptions rather than
3468 * \#GPs.
3469 *
3470 * @returns VBox strict status code.
3471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3472 * @param pSReg Pointer to the segment register.
3473 * @param uSel The new selector value.
3474 *
3475 * @remarks This does _not_ handle CS or SS.
3476 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3477 */
3478IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3479{
3480 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3481
3482 /* Null data selector. */
3483 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3484 {
3485 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3486 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3487 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3488 return VINF_SUCCESS;
3489 }
3490
3491 /* Fetch the descriptor. */
3492 IEMSELDESC Desc;
3493 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3494 if (rcStrict != VINF_SUCCESS)
3495 {
3496 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3497 VBOXSTRICTRC_VAL(rcStrict)));
3498 return rcStrict;
3499 }
3500
3501 /* Must be a data segment or readable code segment. */
3502 if ( !Desc.Legacy.Gen.u1DescType
3503 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3504 {
3505 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3506 Desc.Legacy.Gen.u4Type));
3507 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3508 }
3509
3510 /* Check privileges for data segments and non-conforming code segments. */
3511 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3512 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3513 {
3514 /* The RPL and the new CPL must be less than or equal to the DPL. */
3515 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3516 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3517 {
3518 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3519 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3520 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3521 }
3522 }
3523
3524 /* Is it there? */
3525 if (!Desc.Legacy.Gen.u1Present)
3526 {
3527 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3528 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3529 }
3530
3531 /* The base and limit. */
3532 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3533 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3534
3535 /*
3536 * Ok, everything checked out fine. Now set the accessed bit before
3537 * committing the result into the registers.
3538 */
3539 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3540 {
3541 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3542 if (rcStrict != VINF_SUCCESS)
3543 return rcStrict;
3544 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3545 }
3546
3547 /* Commit */
3548 pSReg->Sel = uSel;
3549 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3550 pSReg->u32Limit = cbLimit;
3551 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3552 pSReg->ValidSel = uSel;
3553 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3554 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3555 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3556
3557 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/**
3564 * Performs a task switch.
3565 *
3566 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3567 * caller is responsible for performing the necessary checks (like DPL, TSS
3568 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3569 * reference for JMP, CALL, IRET.
3570 *
3571 * If the task switch is the due to a software interrupt or hardware exception,
3572 * the caller is responsible for validating the TSS selector and descriptor. See
3573 * Intel Instruction reference for INT n.
3574 *
3575 * @returns VBox strict status code.
3576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3577 * @param pCtx The CPU context.
3578 * @param enmTaskSwitch What caused this task switch.
3579 * @param uNextEip The EIP effective after the task switch.
3580 * @param fFlags The flags.
3581 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3582 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3583 * @param SelTSS The TSS selector of the new task.
3584 * @param pNewDescTSS Pointer to the new TSS descriptor.
3585 */
3586IEM_STATIC VBOXSTRICTRC
3587iemTaskSwitch(PVMCPU pVCpu,
3588 PCPUMCTX pCtx,
3589 IEMTASKSWITCH enmTaskSwitch,
3590 uint32_t uNextEip,
3591 uint32_t fFlags,
3592 uint16_t uErr,
3593 uint64_t uCr2,
3594 RTSEL SelTSS,
3595 PIEMSELDESC pNewDescTSS)
3596{
3597 Assert(!IEM_IS_REAL_MODE(pVCpu));
3598 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3599
3600 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3601 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3602 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3603 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3604 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3605
3606 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3607 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3608
3609 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3610 fIsNewTSS386, pCtx->eip, uNextEip));
3611
3612 /* Update CR2 in case it's a page-fault. */
3613 /** @todo This should probably be done much earlier in IEM/PGM. See
3614 * @bugref{5653#c49}. */
3615 if (fFlags & IEM_XCPT_FLAGS_CR2)
3616 pCtx->cr2 = uCr2;
3617
3618 /*
3619 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3620 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3621 */
3622 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3623 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3624 if (uNewTSSLimit < uNewTSSLimitMin)
3625 {
3626 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3627 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3629 }
3630
3631 /*
3632 * Check the current TSS limit. The last written byte to the current TSS during the
3633 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3634 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3635 *
3636 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3637 * end up with smaller than "legal" TSS limits.
3638 */
3639 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3640 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3641 if (uCurTSSLimit < uCurTSSLimitMin)
3642 {
3643 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3644 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3646 }
3647
3648 /*
3649 * Verify that the new TSS can be accessed and map it. Map only the required contents
3650 * and not the entire TSS.
3651 */
3652 void *pvNewTSS;
3653 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3654 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3655 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3656 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3657 * not perform correct translation if this happens. See Intel spec. 7.2.1
3658 * "Task-State Segment" */
3659 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3663 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3664 return rcStrict;
3665 }
3666
3667 /*
3668 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3669 */
3670 uint32_t u32EFlags = pCtx->eflags.u32;
3671 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3672 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3673 {
3674 PX86DESC pDescCurTSS;
3675 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3676 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3677 if (rcStrict != VINF_SUCCESS)
3678 {
3679 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3680 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3681 return rcStrict;
3682 }
3683
3684 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3685 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3686 if (rcStrict != VINF_SUCCESS)
3687 {
3688 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3689 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3690 return rcStrict;
3691 }
3692
3693 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3694 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3695 {
3696 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3697 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3698 u32EFlags &= ~X86_EFL_NT;
3699 }
3700 }
3701
3702 /*
3703 * Save the CPU state into the current TSS.
3704 */
3705 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3706 if (GCPtrNewTSS == GCPtrCurTSS)
3707 {
3708 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3709 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3710 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3711 }
3712 if (fIsNewTSS386)
3713 {
3714 /*
3715 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3716 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3717 */
3718 void *pvCurTSS32;
3719 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3720 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3721 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3722 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3723 if (rcStrict != VINF_SUCCESS)
3724 {
3725 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3726 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3727 return rcStrict;
3728 }
3729
3730 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3731 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3732 pCurTSS32->eip = uNextEip;
3733 pCurTSS32->eflags = u32EFlags;
3734 pCurTSS32->eax = pCtx->eax;
3735 pCurTSS32->ecx = pCtx->ecx;
3736 pCurTSS32->edx = pCtx->edx;
3737 pCurTSS32->ebx = pCtx->ebx;
3738 pCurTSS32->esp = pCtx->esp;
3739 pCurTSS32->ebp = pCtx->ebp;
3740 pCurTSS32->esi = pCtx->esi;
3741 pCurTSS32->edi = pCtx->edi;
3742 pCurTSS32->es = pCtx->es.Sel;
3743 pCurTSS32->cs = pCtx->cs.Sel;
3744 pCurTSS32->ss = pCtx->ss.Sel;
3745 pCurTSS32->ds = pCtx->ds.Sel;
3746 pCurTSS32->fs = pCtx->fs.Sel;
3747 pCurTSS32->gs = pCtx->gs.Sel;
3748
3749 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3750 if (rcStrict != VINF_SUCCESS)
3751 {
3752 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3753 VBOXSTRICTRC_VAL(rcStrict)));
3754 return rcStrict;
3755 }
3756 }
3757 else
3758 {
3759 /*
3760 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3761 */
3762 void *pvCurTSS16;
3763 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3764 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3765 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3766 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3767 if (rcStrict != VINF_SUCCESS)
3768 {
3769 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3770 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3771 return rcStrict;
3772 }
3773
3774 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3775 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3776 pCurTSS16->ip = uNextEip;
3777 pCurTSS16->flags = u32EFlags;
3778 pCurTSS16->ax = pCtx->ax;
3779 pCurTSS16->cx = pCtx->cx;
3780 pCurTSS16->dx = pCtx->dx;
3781 pCurTSS16->bx = pCtx->bx;
3782 pCurTSS16->sp = pCtx->sp;
3783 pCurTSS16->bp = pCtx->bp;
3784 pCurTSS16->si = pCtx->si;
3785 pCurTSS16->di = pCtx->di;
3786 pCurTSS16->es = pCtx->es.Sel;
3787 pCurTSS16->cs = pCtx->cs.Sel;
3788 pCurTSS16->ss = pCtx->ss.Sel;
3789 pCurTSS16->ds = pCtx->ds.Sel;
3790
3791 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3792 if (rcStrict != VINF_SUCCESS)
3793 {
3794 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3795 VBOXSTRICTRC_VAL(rcStrict)));
3796 return rcStrict;
3797 }
3798 }
3799
3800 /*
3801 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3802 */
3803 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3804 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3805 {
3806 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3807 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3808 pNewTSS->selPrev = pCtx->tr.Sel;
3809 }
3810
3811 /*
3812 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3813 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3814 */
3815 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3816 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3817 bool fNewDebugTrap;
3818 if (fIsNewTSS386)
3819 {
3820 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3821 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3822 uNewEip = pNewTSS32->eip;
3823 uNewEflags = pNewTSS32->eflags;
3824 uNewEax = pNewTSS32->eax;
3825 uNewEcx = pNewTSS32->ecx;
3826 uNewEdx = pNewTSS32->edx;
3827 uNewEbx = pNewTSS32->ebx;
3828 uNewEsp = pNewTSS32->esp;
3829 uNewEbp = pNewTSS32->ebp;
3830 uNewEsi = pNewTSS32->esi;
3831 uNewEdi = pNewTSS32->edi;
3832 uNewES = pNewTSS32->es;
3833 uNewCS = pNewTSS32->cs;
3834 uNewSS = pNewTSS32->ss;
3835 uNewDS = pNewTSS32->ds;
3836 uNewFS = pNewTSS32->fs;
3837 uNewGS = pNewTSS32->gs;
3838 uNewLdt = pNewTSS32->selLdt;
3839 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3840 }
3841 else
3842 {
3843 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3844 uNewCr3 = 0;
3845 uNewEip = pNewTSS16->ip;
3846 uNewEflags = pNewTSS16->flags;
3847 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3848 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3849 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3850 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3851 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3852 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3853 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3854 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3855 uNewES = pNewTSS16->es;
3856 uNewCS = pNewTSS16->cs;
3857 uNewSS = pNewTSS16->ss;
3858 uNewDS = pNewTSS16->ds;
3859 uNewFS = 0;
3860 uNewGS = 0;
3861 uNewLdt = pNewTSS16->selLdt;
3862 fNewDebugTrap = false;
3863 }
3864
3865 if (GCPtrNewTSS == GCPtrCurTSS)
3866 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3867 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3868
3869 /*
3870 * We're done accessing the new TSS.
3871 */
3872 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3873 if (rcStrict != VINF_SUCCESS)
3874 {
3875 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3876 return rcStrict;
3877 }
3878
3879 /*
3880 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3881 */
3882 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3883 {
3884 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3885 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3886 if (rcStrict != VINF_SUCCESS)
3887 {
3888 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3889 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3890 return rcStrict;
3891 }
3892
3893 /* Check that the descriptor indicates the new TSS is available (not busy). */
3894 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3895 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3896 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3897
3898 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3899 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3900 if (rcStrict != VINF_SUCCESS)
3901 {
3902 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3903 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3904 return rcStrict;
3905 }
3906 }
3907
3908 /*
3909 * From this point on, we're technically in the new task. We will defer exceptions
3910 * until the completion of the task switch but before executing any instructions in the new task.
3911 */
3912 pCtx->tr.Sel = SelTSS;
3913 pCtx->tr.ValidSel = SelTSS;
3914 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3915 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3916 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3917 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3918 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3919
3920 /* Set the busy bit in TR. */
3921 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3922 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3923 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3924 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3925 {
3926 uNewEflags |= X86_EFL_NT;
3927 }
3928
3929 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3930 pCtx->cr0 |= X86_CR0_TS;
3931 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3932
3933 pCtx->eip = uNewEip;
3934 pCtx->eax = uNewEax;
3935 pCtx->ecx = uNewEcx;
3936 pCtx->edx = uNewEdx;
3937 pCtx->ebx = uNewEbx;
3938 pCtx->esp = uNewEsp;
3939 pCtx->ebp = uNewEbp;
3940 pCtx->esi = uNewEsi;
3941 pCtx->edi = uNewEdi;
3942
3943 uNewEflags &= X86_EFL_LIVE_MASK;
3944 uNewEflags |= X86_EFL_RA1_MASK;
3945 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3946
3947 /*
3948 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3949 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3950 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3951 */
3952 pCtx->es.Sel = uNewES;
3953 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3954
3955 pCtx->cs.Sel = uNewCS;
3956 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3957
3958 pCtx->ss.Sel = uNewSS;
3959 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3960
3961 pCtx->ds.Sel = uNewDS;
3962 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3963
3964 pCtx->fs.Sel = uNewFS;
3965 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3966
3967 pCtx->gs.Sel = uNewGS;
3968 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3969 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3970
3971 pCtx->ldtr.Sel = uNewLdt;
3972 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3973 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3974 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3975
3976 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3977 {
3978 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3979 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3980 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3981 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3982 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3983 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3984 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3985 }
3986
3987 /*
3988 * Switch CR3 for the new task.
3989 */
3990 if ( fIsNewTSS386
3991 && (pCtx->cr0 & X86_CR0_PG))
3992 {
3993 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3994 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3995 {
3996 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3997 AssertRCSuccessReturn(rc, rc);
3998 }
3999 else
4000 pCtx->cr3 = uNewCr3;
4001
4002 /* Inform PGM. */
4003 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4004 {
4005 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4006 AssertRCReturn(rc, rc);
4007 /* ignore informational status codes */
4008 }
4009 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4010 }
4011
4012 /*
4013 * Switch LDTR for the new task.
4014 */
4015 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4016 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4017 else
4018 {
4019 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4020
4021 IEMSELDESC DescNewLdt;
4022 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4023 if (rcStrict != VINF_SUCCESS)
4024 {
4025 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4026 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4027 return rcStrict;
4028 }
4029 if ( !DescNewLdt.Legacy.Gen.u1Present
4030 || DescNewLdt.Legacy.Gen.u1DescType
4031 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4032 {
4033 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4034 uNewLdt, DescNewLdt.Legacy.u));
4035 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4036 }
4037
4038 pCtx->ldtr.ValidSel = uNewLdt;
4039 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4040 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4041 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4042 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4043 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4044 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4046 }
4047
4048 IEMSELDESC DescSS;
4049 if (IEM_IS_V86_MODE(pVCpu))
4050 {
4051 pVCpu->iem.s.uCpl = 3;
4052 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4053 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4054 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4055 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4056 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4057 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4058
4059 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4060 DescSS.Legacy.u = 0;
4061 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4062 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4063 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4064 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4065 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4066 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4067 DescSS.Legacy.Gen.u2Dpl = 3;
4068 }
4069 else
4070 {
4071 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4072
4073 /*
4074 * Load the stack segment for the new task.
4075 */
4076 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4077 {
4078 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4079 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4080 }
4081
4082 /* Fetch the descriptor. */
4083 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4084 if (rcStrict != VINF_SUCCESS)
4085 {
4086 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4087 VBOXSTRICTRC_VAL(rcStrict)));
4088 return rcStrict;
4089 }
4090
4091 /* SS must be a data segment and writable. */
4092 if ( !DescSS.Legacy.Gen.u1DescType
4093 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4094 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4095 {
4096 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4097 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4098 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4099 }
4100
4101 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4102 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4103 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4104 {
4105 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4106 uNewCpl));
4107 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4108 }
4109
4110 /* Is it there? */
4111 if (!DescSS.Legacy.Gen.u1Present)
4112 {
4113 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4114 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4115 }
4116
4117 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4118 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4119
4120 /* Set the accessed bit before committing the result into SS. */
4121 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4122 {
4123 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4124 if (rcStrict != VINF_SUCCESS)
4125 return rcStrict;
4126 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4127 }
4128
4129 /* Commit SS. */
4130 pCtx->ss.Sel = uNewSS;
4131 pCtx->ss.ValidSel = uNewSS;
4132 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4133 pCtx->ss.u32Limit = cbLimit;
4134 pCtx->ss.u64Base = u64Base;
4135 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4137
4138 /* CPL has changed, update IEM before loading rest of segments. */
4139 pVCpu->iem.s.uCpl = uNewCpl;
4140
4141 /*
4142 * Load the data segments for the new task.
4143 */
4144 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4145 if (rcStrict != VINF_SUCCESS)
4146 return rcStrict;
4147 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4148 if (rcStrict != VINF_SUCCESS)
4149 return rcStrict;
4150 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4151 if (rcStrict != VINF_SUCCESS)
4152 return rcStrict;
4153 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4154 if (rcStrict != VINF_SUCCESS)
4155 return rcStrict;
4156
4157 /*
4158 * Load the code segment for the new task.
4159 */
4160 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4161 {
4162 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4163 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4164 }
4165
4166 /* Fetch the descriptor. */
4167 IEMSELDESC DescCS;
4168 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4169 if (rcStrict != VINF_SUCCESS)
4170 {
4171 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4172 return rcStrict;
4173 }
4174
4175 /* CS must be a code segment. */
4176 if ( !DescCS.Legacy.Gen.u1DescType
4177 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4178 {
4179 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4180 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4181 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4182 }
4183
4184 /* For conforming CS, DPL must be less than or equal to the RPL. */
4185 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4186 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4187 {
4188 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4189 DescCS.Legacy.Gen.u2Dpl));
4190 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4191 }
4192
4193 /* For non-conforming CS, DPL must match RPL. */
4194 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4195 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4196 {
4197 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4198 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4199 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4200 }
4201
4202 /* Is it there? */
4203 if (!DescCS.Legacy.Gen.u1Present)
4204 {
4205 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4206 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4207 }
4208
4209 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4210 u64Base = X86DESC_BASE(&DescCS.Legacy);
4211
4212 /* Set the accessed bit before committing the result into CS. */
4213 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4214 {
4215 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4216 if (rcStrict != VINF_SUCCESS)
4217 return rcStrict;
4218 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4219 }
4220
4221 /* Commit CS. */
4222 pCtx->cs.Sel = uNewCS;
4223 pCtx->cs.ValidSel = uNewCS;
4224 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4225 pCtx->cs.u32Limit = cbLimit;
4226 pCtx->cs.u64Base = u64Base;
4227 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4229 }
4230
4231 /** @todo Debug trap. */
4232 if (fIsNewTSS386 && fNewDebugTrap)
4233 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4234
4235 /*
4236 * Construct the error code masks based on what caused this task switch.
4237 * See Intel Instruction reference for INT.
4238 */
4239 uint16_t uExt;
4240 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4241 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4242 {
4243 uExt = 1;
4244 }
4245 else
4246 uExt = 0;
4247
4248 /*
4249 * Push any error code on to the new stack.
4250 */
4251 if (fFlags & IEM_XCPT_FLAGS_ERR)
4252 {
4253 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4254 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4255 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4256
4257 /* Check that there is sufficient space on the stack. */
4258 /** @todo Factor out segment limit checking for normal/expand down segments
4259 * into a separate function. */
4260 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4261 {
4262 if ( pCtx->esp - 1 > cbLimitSS
4263 || pCtx->esp < cbStackFrame)
4264 {
4265 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4266 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4267 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4268 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4269 }
4270 }
4271 else
4272 {
4273 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4274 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4275 {
4276 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4277 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4278 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4279 }
4280 }
4281
4282
4283 if (fIsNewTSS386)
4284 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4285 else
4286 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4287 if (rcStrict != VINF_SUCCESS)
4288 {
4289 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4290 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4291 return rcStrict;
4292 }
4293 }
4294
4295 /* Check the new EIP against the new CS limit. */
4296 if (pCtx->eip > pCtx->cs.u32Limit)
4297 {
4298 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4299 pCtx->eip, pCtx->cs.u32Limit));
4300 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4301 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4302 }
4303
4304 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4305 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4306}
4307
4308
4309/**
4310 * Implements exceptions and interrupts for protected mode.
4311 *
4312 * @returns VBox strict status code.
4313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4314 * @param pCtx The CPU context.
4315 * @param cbInstr The number of bytes to offset rIP by in the return
4316 * address.
4317 * @param u8Vector The interrupt / exception vector number.
4318 * @param fFlags The flags.
4319 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4320 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4321 */
4322IEM_STATIC VBOXSTRICTRC
4323iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4324 PCPUMCTX pCtx,
4325 uint8_t cbInstr,
4326 uint8_t u8Vector,
4327 uint32_t fFlags,
4328 uint16_t uErr,
4329 uint64_t uCr2)
4330{
4331 /*
4332 * Read the IDT entry.
4333 */
4334 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4335 {
4336 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4337 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4338 }
4339 X86DESC Idte;
4340 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4341 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4342 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4343 return rcStrict;
4344 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4345 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4346 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4347
4348 /*
4349 * Check the descriptor type, DPL and such.
4350 * ASSUMES this is done in the same order as described for call-gate calls.
4351 */
4352 if (Idte.Gate.u1DescType)
4353 {
4354 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4355 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4356 }
4357 bool fTaskGate = false;
4358 uint8_t f32BitGate = true;
4359 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4360 switch (Idte.Gate.u4Type)
4361 {
4362 case X86_SEL_TYPE_SYS_UNDEFINED:
4363 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4364 case X86_SEL_TYPE_SYS_LDT:
4365 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4366 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4367 case X86_SEL_TYPE_SYS_UNDEFINED2:
4368 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4369 case X86_SEL_TYPE_SYS_UNDEFINED3:
4370 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4371 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4372 case X86_SEL_TYPE_SYS_UNDEFINED4:
4373 {
4374 /** @todo check what actually happens when the type is wrong...
4375 * esp. call gates. */
4376 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4377 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4378 }
4379
4380 case X86_SEL_TYPE_SYS_286_INT_GATE:
4381 f32BitGate = false;
4382 case X86_SEL_TYPE_SYS_386_INT_GATE:
4383 fEflToClear |= X86_EFL_IF;
4384 break;
4385
4386 case X86_SEL_TYPE_SYS_TASK_GATE:
4387 fTaskGate = true;
4388#ifndef IEM_IMPLEMENTS_TASKSWITCH
4389 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4390#endif
4391 break;
4392
4393 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4394 f32BitGate = false;
4395 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4396 break;
4397
4398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4399 }
4400
4401 /* Check DPL against CPL if applicable. */
4402 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4403 {
4404 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4405 {
4406 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4407 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4408 }
4409 }
4410
4411 /* Is it there? */
4412 if (!Idte.Gate.u1Present)
4413 {
4414 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4415 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4416 }
4417
4418 /* Is it a task-gate? */
4419 if (fTaskGate)
4420 {
4421 /*
4422 * Construct the error code masks based on what caused this task switch.
4423 * See Intel Instruction reference for INT.
4424 */
4425 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4426 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4427 RTSEL SelTSS = Idte.Gate.u16Sel;
4428
4429 /*
4430 * Fetch the TSS descriptor in the GDT.
4431 */
4432 IEMSELDESC DescTSS;
4433 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4434 if (rcStrict != VINF_SUCCESS)
4435 {
4436 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4437 VBOXSTRICTRC_VAL(rcStrict)));
4438 return rcStrict;
4439 }
4440
4441 /* The TSS descriptor must be a system segment and be available (not busy). */
4442 if ( DescTSS.Legacy.Gen.u1DescType
4443 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4444 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4445 {
4446 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4447 u8Vector, SelTSS, DescTSS.Legacy.au64));
4448 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4449 }
4450
4451 /* The TSS must be present. */
4452 if (!DescTSS.Legacy.Gen.u1Present)
4453 {
4454 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4455 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4456 }
4457
4458 /* Do the actual task switch. */
4459 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4460 }
4461
4462 /* A null CS is bad. */
4463 RTSEL NewCS = Idte.Gate.u16Sel;
4464 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4465 {
4466 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4467 return iemRaiseGeneralProtectionFault0(pVCpu);
4468 }
4469
4470 /* Fetch the descriptor for the new CS. */
4471 IEMSELDESC DescCS;
4472 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4473 if (rcStrict != VINF_SUCCESS)
4474 {
4475 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4476 return rcStrict;
4477 }
4478
4479 /* Must be a code segment. */
4480 if (!DescCS.Legacy.Gen.u1DescType)
4481 {
4482 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4483 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4484 }
4485 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4486 {
4487 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4488 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4489 }
4490
4491 /* Don't allow lowering the privilege level. */
4492 /** @todo Does the lowering of privileges apply to software interrupts
4493 * only? This has bearings on the more-privileged or
4494 * same-privilege stack behavior further down. A testcase would
4495 * be nice. */
4496 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4497 {
4498 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4499 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4500 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* Make sure the selector is present. */
4504 if (!DescCS.Legacy.Gen.u1Present)
4505 {
4506 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4507 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4508 }
4509
4510 /* Check the new EIP against the new CS limit. */
4511 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4512 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4513 ? Idte.Gate.u16OffsetLow
4514 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4515 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4516 if (uNewEip > cbLimitCS)
4517 {
4518 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4519 u8Vector, uNewEip, cbLimitCS, NewCS));
4520 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4521 }
4522
4523 /* Calc the flag image to push. */
4524 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4525 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4526 fEfl &= ~X86_EFL_RF;
4527 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4528 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4529
4530 /* From V8086 mode only go to CPL 0. */
4531 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4532 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4533 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4534 {
4535 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4536 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4537 }
4538
4539 /*
4540 * If the privilege level changes, we need to get a new stack from the TSS.
4541 * This in turns means validating the new SS and ESP...
4542 */
4543 if (uNewCpl != pVCpu->iem.s.uCpl)
4544 {
4545 RTSEL NewSS;
4546 uint32_t uNewEsp;
4547 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4548 if (rcStrict != VINF_SUCCESS)
4549 return rcStrict;
4550
4551 IEMSELDESC DescSS;
4552 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555
4556 /* Check that there is sufficient space for the stack frame. */
4557 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4558 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4559 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4560 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4561
4562 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4563 {
4564 if ( uNewEsp - 1 > cbLimitSS
4565 || uNewEsp < cbStackFrame)
4566 {
4567 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4568 u8Vector, NewSS, uNewEsp, cbStackFrame));
4569 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4570 }
4571 }
4572 else
4573 {
4574 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4575 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4576 {
4577 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4578 u8Vector, NewSS, uNewEsp, cbStackFrame));
4579 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4580 }
4581 }
4582
4583 /*
4584 * Start making changes.
4585 */
4586
4587 /* Set the new CPL so that stack accesses use it. */
4588 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4589 pVCpu->iem.s.uCpl = uNewCpl;
4590
4591 /* Create the stack frame. */
4592 RTPTRUNION uStackFrame;
4593 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4594 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4595 if (rcStrict != VINF_SUCCESS)
4596 return rcStrict;
4597 void * const pvStackFrame = uStackFrame.pv;
4598 if (f32BitGate)
4599 {
4600 if (fFlags & IEM_XCPT_FLAGS_ERR)
4601 *uStackFrame.pu32++ = uErr;
4602 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4603 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4604 uStackFrame.pu32[2] = fEfl;
4605 uStackFrame.pu32[3] = pCtx->esp;
4606 uStackFrame.pu32[4] = pCtx->ss.Sel;
4607 if (fEfl & X86_EFL_VM)
4608 {
4609 uStackFrame.pu32[1] = pCtx->cs.Sel;
4610 uStackFrame.pu32[5] = pCtx->es.Sel;
4611 uStackFrame.pu32[6] = pCtx->ds.Sel;
4612 uStackFrame.pu32[7] = pCtx->fs.Sel;
4613 uStackFrame.pu32[8] = pCtx->gs.Sel;
4614 }
4615 }
4616 else
4617 {
4618 if (fFlags & IEM_XCPT_FLAGS_ERR)
4619 *uStackFrame.pu16++ = uErr;
4620 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4621 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4622 uStackFrame.pu16[2] = fEfl;
4623 uStackFrame.pu16[3] = pCtx->sp;
4624 uStackFrame.pu16[4] = pCtx->ss.Sel;
4625 if (fEfl & X86_EFL_VM)
4626 {
4627 uStackFrame.pu16[1] = pCtx->cs.Sel;
4628 uStackFrame.pu16[5] = pCtx->es.Sel;
4629 uStackFrame.pu16[6] = pCtx->ds.Sel;
4630 uStackFrame.pu16[7] = pCtx->fs.Sel;
4631 uStackFrame.pu16[8] = pCtx->gs.Sel;
4632 }
4633 }
4634 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4635 if (rcStrict != VINF_SUCCESS)
4636 return rcStrict;
4637
4638 /* Mark the selectors 'accessed' (hope this is the correct time). */
4639 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4640 * after pushing the stack frame? (Write protect the gdt + stack to
4641 * find out.) */
4642 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4643 {
4644 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4645 if (rcStrict != VINF_SUCCESS)
4646 return rcStrict;
4647 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4648 }
4649
4650 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4651 {
4652 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4653 if (rcStrict != VINF_SUCCESS)
4654 return rcStrict;
4655 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4656 }
4657
4658 /*
4659 * Start comitting the register changes (joins with the DPL=CPL branch).
4660 */
4661 pCtx->ss.Sel = NewSS;
4662 pCtx->ss.ValidSel = NewSS;
4663 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4664 pCtx->ss.u32Limit = cbLimitSS;
4665 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4666 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4667 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4668 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4669 * SP is loaded).
4670 * Need to check the other combinations too:
4671 * - 16-bit TSS, 32-bit handler
4672 * - 32-bit TSS, 16-bit handler */
4673 if (!pCtx->ss.Attr.n.u1DefBig)
4674 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4675 else
4676 pCtx->rsp = uNewEsp - cbStackFrame;
4677
4678 if (fEfl & X86_EFL_VM)
4679 {
4680 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4681 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4682 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4683 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4684 }
4685 }
4686 /*
4687 * Same privilege, no stack change and smaller stack frame.
4688 */
4689 else
4690 {
4691 uint64_t uNewRsp;
4692 RTPTRUNION uStackFrame;
4693 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4694 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4695 if (rcStrict != VINF_SUCCESS)
4696 return rcStrict;
4697 void * const pvStackFrame = uStackFrame.pv;
4698
4699 if (f32BitGate)
4700 {
4701 if (fFlags & IEM_XCPT_FLAGS_ERR)
4702 *uStackFrame.pu32++ = uErr;
4703 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4704 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4705 uStackFrame.pu32[2] = fEfl;
4706 }
4707 else
4708 {
4709 if (fFlags & IEM_XCPT_FLAGS_ERR)
4710 *uStackFrame.pu16++ = uErr;
4711 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4712 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4713 uStackFrame.pu16[2] = fEfl;
4714 }
4715 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4716 if (rcStrict != VINF_SUCCESS)
4717 return rcStrict;
4718
4719 /* Mark the CS selector as 'accessed'. */
4720 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4721 {
4722 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4723 if (rcStrict != VINF_SUCCESS)
4724 return rcStrict;
4725 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4726 }
4727
4728 /*
4729 * Start committing the register changes (joins with the other branch).
4730 */
4731 pCtx->rsp = uNewRsp;
4732 }
4733
4734 /* ... register committing continues. */
4735 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4736 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4737 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4738 pCtx->cs.u32Limit = cbLimitCS;
4739 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4740 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4741
4742 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4743 fEfl &= ~fEflToClear;
4744 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4745
4746 if (fFlags & IEM_XCPT_FLAGS_CR2)
4747 pCtx->cr2 = uCr2;
4748
4749 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4750 iemRaiseXcptAdjustState(pCtx, u8Vector);
4751
4752 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4753}
4754
4755
4756/**
4757 * Implements exceptions and interrupts for long mode.
4758 *
4759 * @returns VBox strict status code.
4760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4761 * @param pCtx The CPU context.
4762 * @param cbInstr The number of bytes to offset rIP by in the return
4763 * address.
4764 * @param u8Vector The interrupt / exception vector number.
4765 * @param fFlags The flags.
4766 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4767 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4768 */
4769IEM_STATIC VBOXSTRICTRC
4770iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4771 PCPUMCTX pCtx,
4772 uint8_t cbInstr,
4773 uint8_t u8Vector,
4774 uint32_t fFlags,
4775 uint16_t uErr,
4776 uint64_t uCr2)
4777{
4778 /*
4779 * Read the IDT entry.
4780 */
4781 uint16_t offIdt = (uint16_t)u8Vector << 4;
4782 if (pCtx->idtr.cbIdt < offIdt + 7)
4783 {
4784 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4785 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4786 }
4787 X86DESC64 Idte;
4788 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4790 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4791 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4792 return rcStrict;
4793 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4794 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4795 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4796
4797 /*
4798 * Check the descriptor type, DPL and such.
4799 * ASSUMES this is done in the same order as described for call-gate calls.
4800 */
4801 if (Idte.Gate.u1DescType)
4802 {
4803 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4804 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4805 }
4806 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4807 switch (Idte.Gate.u4Type)
4808 {
4809 case AMD64_SEL_TYPE_SYS_INT_GATE:
4810 fEflToClear |= X86_EFL_IF;
4811 break;
4812 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4813 break;
4814
4815 default:
4816 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4817 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4818 }
4819
4820 /* Check DPL against CPL if applicable. */
4821 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4822 {
4823 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4824 {
4825 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4826 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4827 }
4828 }
4829
4830 /* Is it there? */
4831 if (!Idte.Gate.u1Present)
4832 {
4833 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4834 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4835 }
4836
4837 /* A null CS is bad. */
4838 RTSEL NewCS = Idte.Gate.u16Sel;
4839 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4840 {
4841 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4842 return iemRaiseGeneralProtectionFault0(pVCpu);
4843 }
4844
4845 /* Fetch the descriptor for the new CS. */
4846 IEMSELDESC DescCS;
4847 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4848 if (rcStrict != VINF_SUCCESS)
4849 {
4850 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4851 return rcStrict;
4852 }
4853
4854 /* Must be a 64-bit code segment. */
4855 if (!DescCS.Long.Gen.u1DescType)
4856 {
4857 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4858 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4859 }
4860 if ( !DescCS.Long.Gen.u1Long
4861 || DescCS.Long.Gen.u1DefBig
4862 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4863 {
4864 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4865 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4866 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4867 }
4868
4869 /* Don't allow lowering the privilege level. For non-conforming CS
4870 selectors, the CS.DPL sets the privilege level the trap/interrupt
4871 handler runs at. For conforming CS selectors, the CPL remains
4872 unchanged, but the CS.DPL must be <= CPL. */
4873 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4874 * when CPU in Ring-0. Result \#GP? */
4875 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4876 {
4877 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4878 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4879 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4880 }
4881
4882
4883 /* Make sure the selector is present. */
4884 if (!DescCS.Legacy.Gen.u1Present)
4885 {
4886 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4887 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4888 }
4889
4890 /* Check that the new RIP is canonical. */
4891 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4892 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4893 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4894 if (!IEM_IS_CANONICAL(uNewRip))
4895 {
4896 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4897 return iemRaiseGeneralProtectionFault0(pVCpu);
4898 }
4899
4900 /*
4901 * If the privilege level changes or if the IST isn't zero, we need to get
4902 * a new stack from the TSS.
4903 */
4904 uint64_t uNewRsp;
4905 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4906 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4907 if ( uNewCpl != pVCpu->iem.s.uCpl
4908 || Idte.Gate.u3IST != 0)
4909 {
4910 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913 }
4914 else
4915 uNewRsp = pCtx->rsp;
4916 uNewRsp &= ~(uint64_t)0xf;
4917
4918 /*
4919 * Calc the flag image to push.
4920 */
4921 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4922 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4923 fEfl &= ~X86_EFL_RF;
4924 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4925 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4926
4927 /*
4928 * Start making changes.
4929 */
4930 /* Set the new CPL so that stack accesses use it. */
4931 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4932 pVCpu->iem.s.uCpl = uNewCpl;
4933
4934 /* Create the stack frame. */
4935 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4936 RTPTRUNION uStackFrame;
4937 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4938 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4939 if (rcStrict != VINF_SUCCESS)
4940 return rcStrict;
4941 void * const pvStackFrame = uStackFrame.pv;
4942
4943 if (fFlags & IEM_XCPT_FLAGS_ERR)
4944 *uStackFrame.pu64++ = uErr;
4945 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4946 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4947 uStackFrame.pu64[2] = fEfl;
4948 uStackFrame.pu64[3] = pCtx->rsp;
4949 uStackFrame.pu64[4] = pCtx->ss.Sel;
4950 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4951 if (rcStrict != VINF_SUCCESS)
4952 return rcStrict;
4953
4954 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4955 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4956 * after pushing the stack frame? (Write protect the gdt + stack to
4957 * find out.) */
4958 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4959 {
4960 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4961 if (rcStrict != VINF_SUCCESS)
4962 return rcStrict;
4963 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4964 }
4965
4966 /*
4967 * Start comitting the register changes.
4968 */
4969 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4970 * hidden registers when interrupting 32-bit or 16-bit code! */
4971 if (uNewCpl != uOldCpl)
4972 {
4973 pCtx->ss.Sel = 0 | uNewCpl;
4974 pCtx->ss.ValidSel = 0 | uNewCpl;
4975 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4976 pCtx->ss.u32Limit = UINT32_MAX;
4977 pCtx->ss.u64Base = 0;
4978 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4979 }
4980 pCtx->rsp = uNewRsp - cbStackFrame;
4981 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4982 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4983 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4984 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4985 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4986 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4987 pCtx->rip = uNewRip;
4988
4989 fEfl &= ~fEflToClear;
4990 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4991
4992 if (fFlags & IEM_XCPT_FLAGS_CR2)
4993 pCtx->cr2 = uCr2;
4994
4995 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4996 iemRaiseXcptAdjustState(pCtx, u8Vector);
4997
4998 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4999}
5000
5001
5002/**
5003 * Implements exceptions and interrupts.
5004 *
5005 * All exceptions and interrupts goes thru this function!
5006 *
5007 * @returns VBox strict status code.
5008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5009 * @param cbInstr The number of bytes to offset rIP by in the return
5010 * address.
5011 * @param u8Vector The interrupt / exception vector number.
5012 * @param fFlags The flags.
5013 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5014 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5015 */
5016DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5017iemRaiseXcptOrInt(PVMCPU pVCpu,
5018 uint8_t cbInstr,
5019 uint8_t u8Vector,
5020 uint32_t fFlags,
5021 uint16_t uErr,
5022 uint64_t uCr2)
5023{
5024 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5025#ifdef IN_RING0
5026 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5027 AssertRCReturn(rc, rc);
5028#endif
5029
5030#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5031 /*
5032 * Flush prefetch buffer
5033 */
5034 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5035#endif
5036
5037 /*
5038 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5039 */
5040 if ( pCtx->eflags.Bits.u1VM
5041 && pCtx->eflags.Bits.u2IOPL != 3
5042 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5043 && (pCtx->cr0 & X86_CR0_PE) )
5044 {
5045 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5046 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5047 u8Vector = X86_XCPT_GP;
5048 uErr = 0;
5049 }
5050#ifdef DBGFTRACE_ENABLED
5051 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5052 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5053 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5054#endif
5055
5056 /*
5057 * Do recursion accounting.
5058 */
5059 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5060 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5061 if (pVCpu->iem.s.cXcptRecursions == 0)
5062 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5063 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5064 else
5065 {
5066 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5067 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5068
5069 /** @todo double and tripple faults. */
5070 if (pVCpu->iem.s.cXcptRecursions >= 3)
5071 {
5072#ifdef DEBUG_bird
5073 AssertFailed();
5074#endif
5075 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5076 }
5077
5078 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5079 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5080 {
5081 ....
5082 } */
5083 }
5084 pVCpu->iem.s.cXcptRecursions++;
5085 pVCpu->iem.s.uCurXcpt = u8Vector;
5086 pVCpu->iem.s.fCurXcpt = fFlags;
5087
5088 /*
5089 * Extensive logging.
5090 */
5091#if defined(LOG_ENABLED) && defined(IN_RING3)
5092 if (LogIs3Enabled())
5093 {
5094 PVM pVM = pVCpu->CTX_SUFF(pVM);
5095 char szRegs[4096];
5096 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5097 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5098 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5099 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5100 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5101 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5102 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5103 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5104 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5105 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5106 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5107 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5108 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5109 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5110 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5111 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5112 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5113 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5114 " efer=%016VR{efer}\n"
5115 " pat=%016VR{pat}\n"
5116 " sf_mask=%016VR{sf_mask}\n"
5117 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5118 " lstar=%016VR{lstar}\n"
5119 " star=%016VR{star} cstar=%016VR{cstar}\n"
5120 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5121 );
5122
5123 char szInstr[256];
5124 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5125 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5126 szInstr, sizeof(szInstr), NULL);
5127 Log3(("%s%s\n", szRegs, szInstr));
5128 }
5129#endif /* LOG_ENABLED */
5130
5131 /*
5132 * Call the mode specific worker function.
5133 */
5134 VBOXSTRICTRC rcStrict;
5135 if (!(pCtx->cr0 & X86_CR0_PE))
5136 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5137 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5138 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5139 else
5140 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5141
5142 /* Flush the prefetch buffer. */
5143#ifdef IEM_WITH_CODE_TLB
5144 pVCpu->iem.s.pbInstrBuf = NULL;
5145#else
5146 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5147#endif
5148
5149 /*
5150 * Unwind.
5151 */
5152 pVCpu->iem.s.cXcptRecursions--;
5153 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5154 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5155 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5156 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5157 return rcStrict;
5158}
5159
5160#ifdef IEM_WITH_SETJMP
5161/**
5162 * See iemRaiseXcptOrInt. Will not return.
5163 */
5164IEM_STATIC DECL_NO_RETURN(void)
5165iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5166 uint8_t cbInstr,
5167 uint8_t u8Vector,
5168 uint32_t fFlags,
5169 uint16_t uErr,
5170 uint64_t uCr2)
5171{
5172 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5173 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5174}
5175#endif
5176
5177
5178/** \#DE - 00. */
5179DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5180{
5181 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5182}
5183
5184
5185/** \#DB - 01.
5186 * @note This automatically clear DR7.GD. */
5187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5188{
5189 /** @todo set/clear RF. */
5190 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5191 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5192}
5193
5194
5195/** \#UD - 06. */
5196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5197{
5198 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5199}
5200
5201
5202/** \#NM - 07. */
5203DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5204{
5205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5206}
5207
5208
5209/** \#TS(err) - 0a. */
5210DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5211{
5212 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5213}
5214
5215
5216/** \#TS(tr) - 0a. */
5217DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5218{
5219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5220 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5221}
5222
5223
5224/** \#TS(0) - 0a. */
5225DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5226{
5227 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5228 0, 0);
5229}
5230
5231
5232/** \#TS(err) - 0a. */
5233DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5234{
5235 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5236 uSel & X86_SEL_MASK_OFF_RPL, 0);
5237}
5238
5239
5240/** \#NP(err) - 0b. */
5241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5242{
5243 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5244}
5245
5246
5247/** \#NP(seg) - 0b. */
5248DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5249{
5250 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5251 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5252}
5253
5254
5255/** \#NP(sel) - 0b. */
5256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5257{
5258 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5259 uSel & ~X86_SEL_RPL, 0);
5260}
5261
5262
5263/** \#SS(seg) - 0c. */
5264DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5265{
5266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5267 uSel & ~X86_SEL_RPL, 0);
5268}
5269
5270
5271/** \#SS(err) - 0c. */
5272DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5273{
5274 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5275}
5276
5277
5278/** \#GP(n) - 0d. */
5279DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5280{
5281 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5282}
5283
5284
5285/** \#GP(0) - 0d. */
5286DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5287{
5288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5289}
5290
5291#ifdef IEM_WITH_SETJMP
5292/** \#GP(0) - 0d. */
5293DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5294{
5295 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5296}
5297#endif
5298
5299
5300/** \#GP(sel) - 0d. */
5301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5302{
5303 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5304 Sel & ~X86_SEL_RPL, 0);
5305}
5306
5307
5308/** \#GP(0) - 0d. */
5309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5310{
5311 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5312}
5313
5314
5315/** \#GP(sel) - 0d. */
5316DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5317{
5318 NOREF(iSegReg); NOREF(fAccess);
5319 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5320 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5321}
5322
5323#ifdef IEM_WITH_SETJMP
5324/** \#GP(sel) - 0d, longjmp. */
5325DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5326{
5327 NOREF(iSegReg); NOREF(fAccess);
5328 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5329 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5330}
5331#endif
5332
5333/** \#GP(sel) - 0d. */
5334DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5335{
5336 NOREF(Sel);
5337 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5338}
5339
5340#ifdef IEM_WITH_SETJMP
5341/** \#GP(sel) - 0d, longjmp. */
5342DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5343{
5344 NOREF(Sel);
5345 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5346}
5347#endif
5348
5349
5350/** \#GP(sel) - 0d. */
5351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5352{
5353 NOREF(iSegReg); NOREF(fAccess);
5354 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5355}
5356
5357#ifdef IEM_WITH_SETJMP
5358/** \#GP(sel) - 0d, longjmp. */
5359DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5360 uint32_t fAccess)
5361{
5362 NOREF(iSegReg); NOREF(fAccess);
5363 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5364}
5365#endif
5366
5367
5368/** \#PF(n) - 0e. */
5369DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5370{
5371 uint16_t uErr;
5372 switch (rc)
5373 {
5374 case VERR_PAGE_NOT_PRESENT:
5375 case VERR_PAGE_TABLE_NOT_PRESENT:
5376 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5377 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5378 uErr = 0;
5379 break;
5380
5381 default:
5382 AssertMsgFailed(("%Rrc\n", rc));
5383 case VERR_ACCESS_DENIED:
5384 uErr = X86_TRAP_PF_P;
5385 break;
5386
5387 /** @todo reserved */
5388 }
5389
5390 if (pVCpu->iem.s.uCpl == 3)
5391 uErr |= X86_TRAP_PF_US;
5392
5393 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5394 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5395 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5396 uErr |= X86_TRAP_PF_ID;
5397
5398#if 0 /* This is so much non-sense, really. Why was it done like that? */
5399 /* Note! RW access callers reporting a WRITE protection fault, will clear
5400 the READ flag before calling. So, read-modify-write accesses (RW)
5401 can safely be reported as READ faults. */
5402 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5403 uErr |= X86_TRAP_PF_RW;
5404#else
5405 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5406 {
5407 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5408 uErr |= X86_TRAP_PF_RW;
5409 }
5410#endif
5411
5412 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5413 uErr, GCPtrWhere);
5414}
5415
5416#ifdef IEM_WITH_SETJMP
5417/** \#PF(n) - 0e, longjmp. */
5418IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5419{
5420 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5421}
5422#endif
5423
5424
5425/** \#MF(0) - 10. */
5426DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5427{
5428 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5429}
5430
5431
5432/** \#AC(0) - 11. */
5433DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5434{
5435 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5436}
5437
5438
5439/**
5440 * Macro for calling iemCImplRaiseDivideError().
5441 *
5442 * This enables us to add/remove arguments and force different levels of
5443 * inlining as we wish.
5444 *
5445 * @return Strict VBox status code.
5446 */
5447#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5448IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5449{
5450 NOREF(cbInstr);
5451 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5452}
5453
5454
5455/**
5456 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5457 *
5458 * This enables us to add/remove arguments and force different levels of
5459 * inlining as we wish.
5460 *
5461 * @return Strict VBox status code.
5462 */
5463#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5464IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5465{
5466 NOREF(cbInstr);
5467 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5468}
5469
5470
5471/**
5472 * Macro for calling iemCImplRaiseInvalidOpcode().
5473 *
5474 * This enables us to add/remove arguments and force different levels of
5475 * inlining as we wish.
5476 *
5477 * @return Strict VBox status code.
5478 */
5479#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5480IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5481{
5482 NOREF(cbInstr);
5483 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5484}
5485
5486
5487/** @} */
5488
5489
5490/*
5491 *
5492 * Helpers routines.
5493 * Helpers routines.
5494 * Helpers routines.
5495 *
5496 */
5497
5498/**
5499 * Recalculates the effective operand size.
5500 *
5501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5502 */
5503IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5504{
5505 switch (pVCpu->iem.s.enmCpuMode)
5506 {
5507 case IEMMODE_16BIT:
5508 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5509 break;
5510 case IEMMODE_32BIT:
5511 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5512 break;
5513 case IEMMODE_64BIT:
5514 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5515 {
5516 case 0:
5517 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5518 break;
5519 case IEM_OP_PRF_SIZE_OP:
5520 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5521 break;
5522 case IEM_OP_PRF_SIZE_REX_W:
5523 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5524 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5525 break;
5526 }
5527 break;
5528 default:
5529 AssertFailed();
5530 }
5531}
5532
5533
5534/**
5535 * Sets the default operand size to 64-bit and recalculates the effective
5536 * operand size.
5537 *
5538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5539 */
5540IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5541{
5542 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5543 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5544 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5545 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5546 else
5547 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5548}
5549
5550
5551/*
5552 *
5553 * Common opcode decoders.
5554 * Common opcode decoders.
5555 * Common opcode decoders.
5556 *
5557 */
5558//#include <iprt/mem.h>
5559
5560/**
5561 * Used to add extra details about a stub case.
5562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5563 */
5564IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5565{
5566#if defined(LOG_ENABLED) && defined(IN_RING3)
5567 PVM pVM = pVCpu->CTX_SUFF(pVM);
5568 char szRegs[4096];
5569 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5570 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5571 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5572 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5573 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5574 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5575 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5576 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5577 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5578 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5579 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5580 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5581 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5582 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5583 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5584 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5585 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5586 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5587 " efer=%016VR{efer}\n"
5588 " pat=%016VR{pat}\n"
5589 " sf_mask=%016VR{sf_mask}\n"
5590 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5591 " lstar=%016VR{lstar}\n"
5592 " star=%016VR{star} cstar=%016VR{cstar}\n"
5593 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5594 );
5595
5596 char szInstr[256];
5597 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5598 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5599 szInstr, sizeof(szInstr), NULL);
5600
5601 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5602#else
5603 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5604#endif
5605}
5606
5607/**
5608 * Complains about a stub.
5609 *
5610 * Providing two versions of this macro, one for daily use and one for use when
5611 * working on IEM.
5612 */
5613#if 0
5614# define IEMOP_BITCH_ABOUT_STUB() \
5615 do { \
5616 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5617 iemOpStubMsg2(pVCpu); \
5618 RTAssertPanic(); \
5619 } while (0)
5620#else
5621# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5622#endif
5623
5624/** Stubs an opcode. */
5625#define FNIEMOP_STUB(a_Name) \
5626 FNIEMOP_DEF(a_Name) \
5627 { \
5628 RT_NOREF_PV(pVCpu); \
5629 IEMOP_BITCH_ABOUT_STUB(); \
5630 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5631 } \
5632 typedef int ignore_semicolon
5633
5634/** Stubs an opcode. */
5635#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5636 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5637 { \
5638 RT_NOREF_PV(pVCpu); \
5639 RT_NOREF_PV(a_Name0); \
5640 IEMOP_BITCH_ABOUT_STUB(); \
5641 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5642 } \
5643 typedef int ignore_semicolon
5644
5645/** Stubs an opcode which currently should raise \#UD. */
5646#define FNIEMOP_UD_STUB(a_Name) \
5647 FNIEMOP_DEF(a_Name) \
5648 { \
5649 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5650 return IEMOP_RAISE_INVALID_OPCODE(); \
5651 } \
5652 typedef int ignore_semicolon
5653
5654/** Stubs an opcode which currently should raise \#UD. */
5655#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5656 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5657 { \
5658 RT_NOREF_PV(pVCpu); \
5659 RT_NOREF_PV(a_Name0); \
5660 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5661 return IEMOP_RAISE_INVALID_OPCODE(); \
5662 } \
5663 typedef int ignore_semicolon
5664
5665
5666
5667/** @name Register Access.
5668 * @{
5669 */
5670
5671/**
5672 * Gets a reference (pointer) to the specified hidden segment register.
5673 *
5674 * @returns Hidden register reference.
5675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5676 * @param iSegReg The segment register.
5677 */
5678IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5679{
5680 Assert(iSegReg < X86_SREG_COUNT);
5681 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5682 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5683
5684#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5685 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5686 { /* likely */ }
5687 else
5688 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5689#else
5690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5691#endif
5692 return pSReg;
5693}
5694
5695
5696/**
5697 * Ensures that the given hidden segment register is up to date.
5698 *
5699 * @returns Hidden register reference.
5700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5701 * @param pSReg The segment register.
5702 */
5703IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5704{
5705#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5706 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5707 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5708#else
5709 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5710 NOREF(pVCpu);
5711#endif
5712 return pSReg;
5713}
5714
5715
5716/**
5717 * Gets a reference (pointer) to the specified segment register (the selector
5718 * value).
5719 *
5720 * @returns Pointer to the selector variable.
5721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5722 * @param iSegReg The segment register.
5723 */
5724DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5725{
5726 Assert(iSegReg < X86_SREG_COUNT);
5727 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5728 return &pCtx->aSRegs[iSegReg].Sel;
5729}
5730
5731
5732/**
5733 * Fetches the selector value of a segment register.
5734 *
5735 * @returns The selector value.
5736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5737 * @param iSegReg The segment register.
5738 */
5739DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5740{
5741 Assert(iSegReg < X86_SREG_COUNT);
5742 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5743}
5744
5745
5746/**
5747 * Gets a reference (pointer) to the specified general purpose register.
5748 *
5749 * @returns Register reference.
5750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5751 * @param iReg The general purpose register.
5752 */
5753DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5754{
5755 Assert(iReg < 16);
5756 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5757 return &pCtx->aGRegs[iReg];
5758}
5759
5760
5761/**
5762 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5763 *
5764 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5765 *
5766 * @returns Register reference.
5767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5768 * @param iReg The register.
5769 */
5770DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5771{
5772 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5773 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5774 {
5775 Assert(iReg < 16);
5776 return &pCtx->aGRegs[iReg].u8;
5777 }
5778 /* high 8-bit register. */
5779 Assert(iReg < 8);
5780 return &pCtx->aGRegs[iReg & 3].bHi;
5781}
5782
5783
5784/**
5785 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5786 *
5787 * @returns Register reference.
5788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5789 * @param iReg The register.
5790 */
5791DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5792{
5793 Assert(iReg < 16);
5794 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5795 return &pCtx->aGRegs[iReg].u16;
5796}
5797
5798
5799/**
5800 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5801 *
5802 * @returns Register reference.
5803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5804 * @param iReg The register.
5805 */
5806DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5807{
5808 Assert(iReg < 16);
5809 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5810 return &pCtx->aGRegs[iReg].u32;
5811}
5812
5813
5814/**
5815 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5816 *
5817 * @returns Register reference.
5818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5819 * @param iReg The register.
5820 */
5821DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5822{
5823 Assert(iReg < 64);
5824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5825 return &pCtx->aGRegs[iReg].u64;
5826}
5827
5828
5829/**
5830 * Fetches the value of a 8-bit general purpose register.
5831 *
5832 * @returns The register value.
5833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5834 * @param iReg The register.
5835 */
5836DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5837{
5838 return *iemGRegRefU8(pVCpu, iReg);
5839}
5840
5841
5842/**
5843 * Fetches the value of a 16-bit general purpose register.
5844 *
5845 * @returns The register value.
5846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5847 * @param iReg The register.
5848 */
5849DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5850{
5851 Assert(iReg < 16);
5852 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5853}
5854
5855
5856/**
5857 * Fetches the value of a 32-bit general purpose register.
5858 *
5859 * @returns The register value.
5860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5861 * @param iReg The register.
5862 */
5863DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5864{
5865 Assert(iReg < 16);
5866 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5867}
5868
5869
5870/**
5871 * Fetches the value of a 64-bit general purpose register.
5872 *
5873 * @returns The register value.
5874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5875 * @param iReg The register.
5876 */
5877DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5878{
5879 Assert(iReg < 16);
5880 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5881}
5882
5883
5884/**
5885 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5886 *
5887 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5888 * segment limit.
5889 *
5890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5891 * @param offNextInstr The offset of the next instruction.
5892 */
5893IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5894{
5895 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5896 switch (pVCpu->iem.s.enmEffOpSize)
5897 {
5898 case IEMMODE_16BIT:
5899 {
5900 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5901 if ( uNewIp > pCtx->cs.u32Limit
5902 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5903 return iemRaiseGeneralProtectionFault0(pVCpu);
5904 pCtx->rip = uNewIp;
5905 break;
5906 }
5907
5908 case IEMMODE_32BIT:
5909 {
5910 Assert(pCtx->rip <= UINT32_MAX);
5911 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5912
5913 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5914 if (uNewEip > pCtx->cs.u32Limit)
5915 return iemRaiseGeneralProtectionFault0(pVCpu);
5916 pCtx->rip = uNewEip;
5917 break;
5918 }
5919
5920 case IEMMODE_64BIT:
5921 {
5922 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5923
5924 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5925 if (!IEM_IS_CANONICAL(uNewRip))
5926 return iemRaiseGeneralProtectionFault0(pVCpu);
5927 pCtx->rip = uNewRip;
5928 break;
5929 }
5930
5931 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5932 }
5933
5934 pCtx->eflags.Bits.u1RF = 0;
5935
5936#ifndef IEM_WITH_CODE_TLB
5937 /* Flush the prefetch buffer. */
5938 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5939#endif
5940
5941 return VINF_SUCCESS;
5942}
5943
5944
5945/**
5946 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5947 *
5948 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5949 * segment limit.
5950 *
5951 * @returns Strict VBox status code.
5952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5953 * @param offNextInstr The offset of the next instruction.
5954 */
5955IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5956{
5957 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5958 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5959
5960 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5961 if ( uNewIp > pCtx->cs.u32Limit
5962 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5963 return iemRaiseGeneralProtectionFault0(pVCpu);
5964 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5965 pCtx->rip = uNewIp;
5966 pCtx->eflags.Bits.u1RF = 0;
5967
5968#ifndef IEM_WITH_CODE_TLB
5969 /* Flush the prefetch buffer. */
5970 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5971#endif
5972
5973 return VINF_SUCCESS;
5974}
5975
5976
5977/**
5978 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5979 *
5980 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5981 * segment limit.
5982 *
5983 * @returns Strict VBox status code.
5984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5985 * @param offNextInstr The offset of the next instruction.
5986 */
5987IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5988{
5989 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5990 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5991
5992 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5993 {
5994 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5995
5996 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5997 if (uNewEip > pCtx->cs.u32Limit)
5998 return iemRaiseGeneralProtectionFault0(pVCpu);
5999 pCtx->rip = uNewEip;
6000 }
6001 else
6002 {
6003 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6004
6005 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6006 if (!IEM_IS_CANONICAL(uNewRip))
6007 return iemRaiseGeneralProtectionFault0(pVCpu);
6008 pCtx->rip = uNewRip;
6009 }
6010 pCtx->eflags.Bits.u1RF = 0;
6011
6012#ifndef IEM_WITH_CODE_TLB
6013 /* Flush the prefetch buffer. */
6014 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6015#endif
6016
6017 return VINF_SUCCESS;
6018}
6019
6020
6021/**
6022 * Performs a near jump to the specified address.
6023 *
6024 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6025 * segment limit.
6026 *
6027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6028 * @param uNewRip The new RIP value.
6029 */
6030IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6031{
6032 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6033 switch (pVCpu->iem.s.enmEffOpSize)
6034 {
6035 case IEMMODE_16BIT:
6036 {
6037 Assert(uNewRip <= UINT16_MAX);
6038 if ( uNewRip > pCtx->cs.u32Limit
6039 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6040 return iemRaiseGeneralProtectionFault0(pVCpu);
6041 /** @todo Test 16-bit jump in 64-bit mode. */
6042 pCtx->rip = uNewRip;
6043 break;
6044 }
6045
6046 case IEMMODE_32BIT:
6047 {
6048 Assert(uNewRip <= UINT32_MAX);
6049 Assert(pCtx->rip <= UINT32_MAX);
6050 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6051
6052 if (uNewRip > pCtx->cs.u32Limit)
6053 return iemRaiseGeneralProtectionFault0(pVCpu);
6054 pCtx->rip = uNewRip;
6055 break;
6056 }
6057
6058 case IEMMODE_64BIT:
6059 {
6060 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6061
6062 if (!IEM_IS_CANONICAL(uNewRip))
6063 return iemRaiseGeneralProtectionFault0(pVCpu);
6064 pCtx->rip = uNewRip;
6065 break;
6066 }
6067
6068 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6069 }
6070
6071 pCtx->eflags.Bits.u1RF = 0;
6072
6073#ifndef IEM_WITH_CODE_TLB
6074 /* Flush the prefetch buffer. */
6075 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6076#endif
6077
6078 return VINF_SUCCESS;
6079}
6080
6081
6082/**
6083 * Get the address of the top of the stack.
6084 *
6085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6086 * @param pCtx The CPU context which SP/ESP/RSP should be
6087 * read.
6088 */
6089DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6090{
6091 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6092 return pCtx->rsp;
6093 if (pCtx->ss.Attr.n.u1DefBig)
6094 return pCtx->esp;
6095 return pCtx->sp;
6096}
6097
6098
6099/**
6100 * Updates the RIP/EIP/IP to point to the next instruction.
6101 *
6102 * This function leaves the EFLAGS.RF flag alone.
6103 *
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 * @param cbInstr The number of bytes to add.
6106 */
6107IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6108{
6109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6110 switch (pVCpu->iem.s.enmCpuMode)
6111 {
6112 case IEMMODE_16BIT:
6113 Assert(pCtx->rip <= UINT16_MAX);
6114 pCtx->eip += cbInstr;
6115 pCtx->eip &= UINT32_C(0xffff);
6116 break;
6117
6118 case IEMMODE_32BIT:
6119 pCtx->eip += cbInstr;
6120 Assert(pCtx->rip <= UINT32_MAX);
6121 break;
6122
6123 case IEMMODE_64BIT:
6124 pCtx->rip += cbInstr;
6125 break;
6126 default: AssertFailed();
6127 }
6128}
6129
6130
6131#if 0
6132/**
6133 * Updates the RIP/EIP/IP to point to the next instruction.
6134 *
6135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6136 */
6137IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6138{
6139 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6140}
6141#endif
6142
6143
6144
6145/**
6146 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6147 *
6148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6149 * @param cbInstr The number of bytes to add.
6150 */
6151IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6152{
6153 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6154
6155 pCtx->eflags.Bits.u1RF = 0;
6156
6157 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6158#if ARCH_BITS >= 64
6159 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6160 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6161 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6162#else
6163 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6164 pCtx->rip += cbInstr;
6165 else
6166 {
6167 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6168 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6169 }
6170#endif
6171}
6172
6173
6174/**
6175 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6176 *
6177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6178 */
6179IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6180{
6181 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6182}
6183
6184
6185/**
6186 * Adds to the stack pointer.
6187 *
6188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6189 * @param pCtx The CPU context which SP/ESP/RSP should be
6190 * updated.
6191 * @param cbToAdd The number of bytes to add (8-bit!).
6192 */
6193DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6194{
6195 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6196 pCtx->rsp += cbToAdd;
6197 else if (pCtx->ss.Attr.n.u1DefBig)
6198 pCtx->esp += cbToAdd;
6199 else
6200 pCtx->sp += cbToAdd;
6201}
6202
6203
6204/**
6205 * Subtracts from the stack pointer.
6206 *
6207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6208 * @param pCtx The CPU context which SP/ESP/RSP should be
6209 * updated.
6210 * @param cbToSub The number of bytes to subtract (8-bit!).
6211 */
6212DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6213{
6214 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6215 pCtx->rsp -= cbToSub;
6216 else if (pCtx->ss.Attr.n.u1DefBig)
6217 pCtx->esp -= cbToSub;
6218 else
6219 pCtx->sp -= cbToSub;
6220}
6221
6222
6223/**
6224 * Adds to the temporary stack pointer.
6225 *
6226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6227 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6228 * @param cbToAdd The number of bytes to add (16-bit).
6229 * @param pCtx Where to get the current stack mode.
6230 */
6231DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6232{
6233 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6234 pTmpRsp->u += cbToAdd;
6235 else if (pCtx->ss.Attr.n.u1DefBig)
6236 pTmpRsp->DWords.dw0 += cbToAdd;
6237 else
6238 pTmpRsp->Words.w0 += cbToAdd;
6239}
6240
6241
6242/**
6243 * Subtracts from the temporary stack pointer.
6244 *
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6247 * @param cbToSub The number of bytes to subtract.
6248 * @param pCtx Where to get the current stack mode.
6249 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6250 * expecting that.
6251 */
6252DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6253{
6254 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6255 pTmpRsp->u -= cbToSub;
6256 else if (pCtx->ss.Attr.n.u1DefBig)
6257 pTmpRsp->DWords.dw0 -= cbToSub;
6258 else
6259 pTmpRsp->Words.w0 -= cbToSub;
6260}
6261
6262
6263/**
6264 * Calculates the effective stack address for a push of the specified size as
6265 * well as the new RSP value (upper bits may be masked).
6266 *
6267 * @returns Effective stack addressf for the push.
6268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6269 * @param pCtx Where to get the current stack mode.
6270 * @param cbItem The size of the stack item to pop.
6271 * @param puNewRsp Where to return the new RSP value.
6272 */
6273DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6274{
6275 RTUINT64U uTmpRsp;
6276 RTGCPTR GCPtrTop;
6277 uTmpRsp.u = pCtx->rsp;
6278
6279 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6280 GCPtrTop = uTmpRsp.u -= cbItem;
6281 else if (pCtx->ss.Attr.n.u1DefBig)
6282 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6283 else
6284 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6285 *puNewRsp = uTmpRsp.u;
6286 return GCPtrTop;
6287}
6288
6289
6290/**
6291 * Gets the current stack pointer and calculates the value after a pop of the
6292 * specified size.
6293 *
6294 * @returns Current stack pointer.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param pCtx Where to get the current stack mode.
6297 * @param cbItem The size of the stack item to pop.
6298 * @param puNewRsp Where to return the new RSP value.
6299 */
6300DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6301{
6302 RTUINT64U uTmpRsp;
6303 RTGCPTR GCPtrTop;
6304 uTmpRsp.u = pCtx->rsp;
6305
6306 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6307 {
6308 GCPtrTop = uTmpRsp.u;
6309 uTmpRsp.u += cbItem;
6310 }
6311 else if (pCtx->ss.Attr.n.u1DefBig)
6312 {
6313 GCPtrTop = uTmpRsp.DWords.dw0;
6314 uTmpRsp.DWords.dw0 += cbItem;
6315 }
6316 else
6317 {
6318 GCPtrTop = uTmpRsp.Words.w0;
6319 uTmpRsp.Words.w0 += cbItem;
6320 }
6321 *puNewRsp = uTmpRsp.u;
6322 return GCPtrTop;
6323}
6324
6325
6326/**
6327 * Calculates the effective stack address for a push of the specified size as
6328 * well as the new temporary RSP value (upper bits may be masked).
6329 *
6330 * @returns Effective stack addressf for the push.
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param pCtx Where to get the current stack mode.
6333 * @param pTmpRsp The temporary stack pointer. This is updated.
6334 * @param cbItem The size of the stack item to pop.
6335 */
6336DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6337{
6338 RTGCPTR GCPtrTop;
6339
6340 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6341 GCPtrTop = pTmpRsp->u -= cbItem;
6342 else if (pCtx->ss.Attr.n.u1DefBig)
6343 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6344 else
6345 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6346 return GCPtrTop;
6347}
6348
6349
6350/**
6351 * Gets the effective stack address for a pop of the specified size and
6352 * calculates and updates the temporary RSP.
6353 *
6354 * @returns Current stack pointer.
6355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6356 * @param pCtx Where to get the current stack mode.
6357 * @param pTmpRsp The temporary stack pointer. This is updated.
6358 * @param cbItem The size of the stack item to pop.
6359 */
6360DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6361{
6362 RTGCPTR GCPtrTop;
6363 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6364 {
6365 GCPtrTop = pTmpRsp->u;
6366 pTmpRsp->u += cbItem;
6367 }
6368 else if (pCtx->ss.Attr.n.u1DefBig)
6369 {
6370 GCPtrTop = pTmpRsp->DWords.dw0;
6371 pTmpRsp->DWords.dw0 += cbItem;
6372 }
6373 else
6374 {
6375 GCPtrTop = pTmpRsp->Words.w0;
6376 pTmpRsp->Words.w0 += cbItem;
6377 }
6378 return GCPtrTop;
6379}
6380
6381/** @} */
6382
6383
6384/** @name FPU access and helpers.
6385 *
6386 * @{
6387 */
6388
6389
6390/**
6391 * Hook for preparing to use the host FPU.
6392 *
6393 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6394 *
6395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6396 */
6397DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6398{
6399#ifdef IN_RING3
6400 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6401#else
6402 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6403#endif
6404}
6405
6406
6407/**
6408 * Hook for preparing to use the host FPU for SSE
6409 *
6410 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6411 *
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 */
6414DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6415{
6416 iemFpuPrepareUsage(pVCpu);
6417}
6418
6419
6420/**
6421 * Hook for actualizing the guest FPU state before the interpreter reads it.
6422 *
6423 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6424 *
6425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6426 */
6427DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6428{
6429#ifdef IN_RING3
6430 NOREF(pVCpu);
6431#else
6432 CPUMRZFpuStateActualizeForRead(pVCpu);
6433#endif
6434}
6435
6436
6437/**
6438 * Hook for actualizing the guest FPU state before the interpreter changes it.
6439 *
6440 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6441 *
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 */
6444DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6445{
6446#ifdef IN_RING3
6447 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6448#else
6449 CPUMRZFpuStateActualizeForChange(pVCpu);
6450#endif
6451}
6452
6453
6454/**
6455 * Hook for actualizing the guest XMM0..15 register state for read only.
6456 *
6457 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6458 *
6459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6460 */
6461DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6462{
6463#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6464 NOREF(pVCpu);
6465#else
6466 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6467#endif
6468}
6469
6470
6471/**
6472 * Hook for actualizing the guest XMM0..15 register state for read+write.
6473 *
6474 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6475 *
6476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6477 */
6478DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6479{
6480#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6481 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6482#else
6483 CPUMRZFpuStateActualizeForChange(pVCpu);
6484#endif
6485}
6486
6487
6488/**
6489 * Stores a QNaN value into a FPU register.
6490 *
6491 * @param pReg Pointer to the register.
6492 */
6493DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6494{
6495 pReg->au32[0] = UINT32_C(0x00000000);
6496 pReg->au32[1] = UINT32_C(0xc0000000);
6497 pReg->au16[4] = UINT16_C(0xffff);
6498}
6499
6500
6501/**
6502 * Updates the FOP, FPU.CS and FPUIP registers.
6503 *
6504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6505 * @param pCtx The CPU context.
6506 * @param pFpuCtx The FPU context.
6507 */
6508DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6509{
6510 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6511 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6512 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6513 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6514 {
6515 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6516 * happens in real mode here based on the fnsave and fnstenv images. */
6517 pFpuCtx->CS = 0;
6518 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6519 }
6520 else
6521 {
6522 pFpuCtx->CS = pCtx->cs.Sel;
6523 pFpuCtx->FPUIP = pCtx->rip;
6524 }
6525}
6526
6527
6528/**
6529 * Updates the x87.DS and FPUDP registers.
6530 *
6531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6532 * @param pCtx The CPU context.
6533 * @param pFpuCtx The FPU context.
6534 * @param iEffSeg The effective segment register.
6535 * @param GCPtrEff The effective address relative to @a iEffSeg.
6536 */
6537DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6538{
6539 RTSEL sel;
6540 switch (iEffSeg)
6541 {
6542 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6543 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6544 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6545 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6546 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6547 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6548 default:
6549 AssertMsgFailed(("%d\n", iEffSeg));
6550 sel = pCtx->ds.Sel;
6551 }
6552 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6553 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6554 {
6555 pFpuCtx->DS = 0;
6556 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6557 }
6558 else
6559 {
6560 pFpuCtx->DS = sel;
6561 pFpuCtx->FPUDP = GCPtrEff;
6562 }
6563}
6564
6565
6566/**
6567 * Rotates the stack registers in the push direction.
6568 *
6569 * @param pFpuCtx The FPU context.
6570 * @remarks This is a complete waste of time, but fxsave stores the registers in
6571 * stack order.
6572 */
6573DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6574{
6575 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6576 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6577 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6578 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6579 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6580 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6581 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6582 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6583 pFpuCtx->aRegs[0].r80 = r80Tmp;
6584}
6585
6586
6587/**
6588 * Rotates the stack registers in the pop direction.
6589 *
6590 * @param pFpuCtx The FPU context.
6591 * @remarks This is a complete waste of time, but fxsave stores the registers in
6592 * stack order.
6593 */
6594DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6595{
6596 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6597 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6598 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6599 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6600 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6601 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6602 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6603 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6604 pFpuCtx->aRegs[7].r80 = r80Tmp;
6605}
6606
6607
6608/**
6609 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6610 * exception prevents it.
6611 *
6612 * @param pResult The FPU operation result to push.
6613 * @param pFpuCtx The FPU context.
6614 */
6615IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6616{
6617 /* Update FSW and bail if there are pending exceptions afterwards. */
6618 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6619 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6620 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6621 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6622 {
6623 pFpuCtx->FSW = fFsw;
6624 return;
6625 }
6626
6627 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6628 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6629 {
6630 /* All is fine, push the actual value. */
6631 pFpuCtx->FTW |= RT_BIT(iNewTop);
6632 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6633 }
6634 else if (pFpuCtx->FCW & X86_FCW_IM)
6635 {
6636 /* Masked stack overflow, push QNaN. */
6637 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6638 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6639 }
6640 else
6641 {
6642 /* Raise stack overflow, don't push anything. */
6643 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6644 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6645 return;
6646 }
6647
6648 fFsw &= ~X86_FSW_TOP_MASK;
6649 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6650 pFpuCtx->FSW = fFsw;
6651
6652 iemFpuRotateStackPush(pFpuCtx);
6653}
6654
6655
6656/**
6657 * Stores a result in a FPU register and updates the FSW and FTW.
6658 *
6659 * @param pFpuCtx The FPU context.
6660 * @param pResult The result to store.
6661 * @param iStReg Which FPU register to store it in.
6662 */
6663IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6664{
6665 Assert(iStReg < 8);
6666 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6667 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6668 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6669 pFpuCtx->FTW |= RT_BIT(iReg);
6670 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6671}
6672
6673
6674/**
6675 * Only updates the FPU status word (FSW) with the result of the current
6676 * instruction.
6677 *
6678 * @param pFpuCtx The FPU context.
6679 * @param u16FSW The FSW output of the current instruction.
6680 */
6681IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6682{
6683 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6684 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6685}
6686
6687
6688/**
6689 * Pops one item off the FPU stack if no pending exception prevents it.
6690 *
6691 * @param pFpuCtx The FPU context.
6692 */
6693IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6694{
6695 /* Check pending exceptions. */
6696 uint16_t uFSW = pFpuCtx->FSW;
6697 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6698 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6699 return;
6700
6701 /* TOP--. */
6702 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6703 uFSW &= ~X86_FSW_TOP_MASK;
6704 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6705 pFpuCtx->FSW = uFSW;
6706
6707 /* Mark the previous ST0 as empty. */
6708 iOldTop >>= X86_FSW_TOP_SHIFT;
6709 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6710
6711 /* Rotate the registers. */
6712 iemFpuRotateStackPop(pFpuCtx);
6713}
6714
6715
6716/**
6717 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6718 *
6719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6720 * @param pResult The FPU operation result to push.
6721 */
6722IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6723{
6724 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6725 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6726 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6727 iemFpuMaybePushResult(pResult, pFpuCtx);
6728}
6729
6730
6731/**
6732 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6733 * and sets FPUDP and FPUDS.
6734 *
6735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6736 * @param pResult The FPU operation result to push.
6737 * @param iEffSeg The effective segment register.
6738 * @param GCPtrEff The effective address relative to @a iEffSeg.
6739 */
6740IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6741{
6742 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6743 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6744 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6745 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6746 iemFpuMaybePushResult(pResult, pFpuCtx);
6747}
6748
6749
6750/**
6751 * Replace ST0 with the first value and push the second onto the FPU stack,
6752 * unless a pending exception prevents it.
6753 *
6754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6755 * @param pResult The FPU operation result to store and push.
6756 */
6757IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6758{
6759 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6760 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6761 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6762
6763 /* Update FSW and bail if there are pending exceptions afterwards. */
6764 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6765 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6766 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6767 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6768 {
6769 pFpuCtx->FSW = fFsw;
6770 return;
6771 }
6772
6773 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6774 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6775 {
6776 /* All is fine, push the actual value. */
6777 pFpuCtx->FTW |= RT_BIT(iNewTop);
6778 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6779 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6780 }
6781 else if (pFpuCtx->FCW & X86_FCW_IM)
6782 {
6783 /* Masked stack overflow, push QNaN. */
6784 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6785 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6786 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6787 }
6788 else
6789 {
6790 /* Raise stack overflow, don't push anything. */
6791 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6792 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6793 return;
6794 }
6795
6796 fFsw &= ~X86_FSW_TOP_MASK;
6797 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6798 pFpuCtx->FSW = fFsw;
6799
6800 iemFpuRotateStackPush(pFpuCtx);
6801}
6802
6803
6804/**
6805 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6806 * FOP.
6807 *
6808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6809 * @param pResult The result to store.
6810 * @param iStReg Which FPU register to store it in.
6811 */
6812IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6813{
6814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6815 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6816 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6817 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6818}
6819
6820
6821/**
6822 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6823 * FOP, and then pops the stack.
6824 *
6825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6826 * @param pResult The result to store.
6827 * @param iStReg Which FPU register to store it in.
6828 */
6829IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6830{
6831 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6832 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6833 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6834 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6835 iemFpuMaybePopOne(pFpuCtx);
6836}
6837
6838
6839/**
6840 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6841 * FPUDP, and FPUDS.
6842 *
6843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6844 * @param pResult The result to store.
6845 * @param iStReg Which FPU register to store it in.
6846 * @param iEffSeg The effective memory operand selector register.
6847 * @param GCPtrEff The effective memory operand offset.
6848 */
6849IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6850 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6851{
6852 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6853 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6854 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6855 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6856 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6857}
6858
6859
6860/**
6861 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6862 * FPUDP, and FPUDS, and then pops the stack.
6863 *
6864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6865 * @param pResult The result to store.
6866 * @param iStReg Which FPU register to store it in.
6867 * @param iEffSeg The effective memory operand selector register.
6868 * @param GCPtrEff The effective memory operand offset.
6869 */
6870IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6871 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6872{
6873 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6874 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6875 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6876 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6877 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6878 iemFpuMaybePopOne(pFpuCtx);
6879}
6880
6881
6882/**
6883 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6884 *
6885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6886 */
6887IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6888{
6889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6890 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6891 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6892}
6893
6894
6895/**
6896 * Marks the specified stack register as free (for FFREE).
6897 *
6898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6899 * @param iStReg The register to free.
6900 */
6901IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6902{
6903 Assert(iStReg < 8);
6904 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6905 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6906 pFpuCtx->FTW &= ~RT_BIT(iReg);
6907}
6908
6909
6910/**
6911 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6912 *
6913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6914 */
6915IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6916{
6917 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6918 uint16_t uFsw = pFpuCtx->FSW;
6919 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6920 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6921 uFsw &= ~X86_FSW_TOP_MASK;
6922 uFsw |= uTop;
6923 pFpuCtx->FSW = uFsw;
6924}
6925
6926
6927/**
6928 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6929 *
6930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6931 */
6932IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6933{
6934 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6935 uint16_t uFsw = pFpuCtx->FSW;
6936 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6937 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6938 uFsw &= ~X86_FSW_TOP_MASK;
6939 uFsw |= uTop;
6940 pFpuCtx->FSW = uFsw;
6941}
6942
6943
6944/**
6945 * Updates the FSW, FOP, FPUIP, and FPUCS.
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 * @param u16FSW The FSW from the current instruction.
6949 */
6950IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6951{
6952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6953 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6954 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6955 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6956}
6957
6958
6959/**
6960 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6961 *
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 * @param u16FSW The FSW from the current instruction.
6964 */
6965IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6966{
6967 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6968 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6969 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6970 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6971 iemFpuMaybePopOne(pFpuCtx);
6972}
6973
6974
6975/**
6976 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6977 *
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 * @param u16FSW The FSW from the current instruction.
6980 * @param iEffSeg The effective memory operand selector register.
6981 * @param GCPtrEff The effective memory operand offset.
6982 */
6983IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6984{
6985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6986 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6987 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6988 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6989 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6990}
6991
6992
6993/**
6994 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6995 *
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 * @param u16FSW The FSW from the current instruction.
6998 */
6999IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7000{
7001 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7002 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7003 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7004 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7005 iemFpuMaybePopOne(pFpuCtx);
7006 iemFpuMaybePopOne(pFpuCtx);
7007}
7008
7009
7010/**
7011 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7012 *
7013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7014 * @param u16FSW The FSW from the current instruction.
7015 * @param iEffSeg The effective memory operand selector register.
7016 * @param GCPtrEff The effective memory operand offset.
7017 */
7018IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7019{
7020 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7021 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7022 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7023 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7024 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7025 iemFpuMaybePopOne(pFpuCtx);
7026}
7027
7028
7029/**
7030 * Worker routine for raising an FPU stack underflow exception.
7031 *
7032 * @param pFpuCtx The FPU context.
7033 * @param iStReg The stack register being accessed.
7034 */
7035IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7036{
7037 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7038 if (pFpuCtx->FCW & X86_FCW_IM)
7039 {
7040 /* Masked underflow. */
7041 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7042 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7043 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7044 if (iStReg != UINT8_MAX)
7045 {
7046 pFpuCtx->FTW |= RT_BIT(iReg);
7047 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7048 }
7049 }
7050 else
7051 {
7052 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7053 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7054 }
7055}
7056
7057
7058/**
7059 * Raises a FPU stack underflow exception.
7060 *
7061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7062 * @param iStReg The destination register that should be loaded
7063 * with QNaN if \#IS is not masked. Specify
7064 * UINT8_MAX if none (like for fcom).
7065 */
7066DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7067{
7068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7069 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7070 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7071 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7072}
7073
7074
7075DECL_NO_INLINE(IEM_STATIC, void)
7076iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7077{
7078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7079 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7080 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7081 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7082 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7083}
7084
7085
7086DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7087{
7088 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7089 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7090 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7091 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7092 iemFpuMaybePopOne(pFpuCtx);
7093}
7094
7095
7096DECL_NO_INLINE(IEM_STATIC, void)
7097iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7098{
7099 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7100 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7101 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7102 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7103 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7104 iemFpuMaybePopOne(pFpuCtx);
7105}
7106
7107
7108DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7109{
7110 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7111 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7112 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7113 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7114 iemFpuMaybePopOne(pFpuCtx);
7115 iemFpuMaybePopOne(pFpuCtx);
7116}
7117
7118
7119DECL_NO_INLINE(IEM_STATIC, void)
7120iemFpuStackPushUnderflow(PVMCPU pVCpu)
7121{
7122 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7123 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7124 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7125
7126 if (pFpuCtx->FCW & X86_FCW_IM)
7127 {
7128 /* Masked overflow - Push QNaN. */
7129 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7130 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7131 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7132 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7133 pFpuCtx->FTW |= RT_BIT(iNewTop);
7134 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7135 iemFpuRotateStackPush(pFpuCtx);
7136 }
7137 else
7138 {
7139 /* Exception pending - don't change TOP or the register stack. */
7140 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7141 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7142 }
7143}
7144
7145
7146DECL_NO_INLINE(IEM_STATIC, void)
7147iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7148{
7149 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7150 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7151 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7152
7153 if (pFpuCtx->FCW & X86_FCW_IM)
7154 {
7155 /* Masked overflow - Push QNaN. */
7156 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7157 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7158 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7159 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7160 pFpuCtx->FTW |= RT_BIT(iNewTop);
7161 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7162 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7163 iemFpuRotateStackPush(pFpuCtx);
7164 }
7165 else
7166 {
7167 /* Exception pending - don't change TOP or the register stack. */
7168 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7169 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7170 }
7171}
7172
7173
7174/**
7175 * Worker routine for raising an FPU stack overflow exception on a push.
7176 *
7177 * @param pFpuCtx The FPU context.
7178 */
7179IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7180{
7181 if (pFpuCtx->FCW & X86_FCW_IM)
7182 {
7183 /* Masked overflow. */
7184 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7185 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7186 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7187 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7188 pFpuCtx->FTW |= RT_BIT(iNewTop);
7189 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7190 iemFpuRotateStackPush(pFpuCtx);
7191 }
7192 else
7193 {
7194 /* Exception pending - don't change TOP or the register stack. */
7195 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7196 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7197 }
7198}
7199
7200
7201/**
7202 * Raises a FPU stack overflow exception on a push.
7203 *
7204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7205 */
7206DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7207{
7208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7209 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7210 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7211 iemFpuStackPushOverflowOnly(pFpuCtx);
7212}
7213
7214
7215/**
7216 * Raises a FPU stack overflow exception on a push with a memory operand.
7217 *
7218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7219 * @param iEffSeg The effective memory operand selector register.
7220 * @param GCPtrEff The effective memory operand offset.
7221 */
7222DECL_NO_INLINE(IEM_STATIC, void)
7223iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7224{
7225 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7226 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7227 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7228 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7229 iemFpuStackPushOverflowOnly(pFpuCtx);
7230}
7231
7232
7233IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7234{
7235 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7236 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7237 if (pFpuCtx->FTW & RT_BIT(iReg))
7238 return VINF_SUCCESS;
7239 return VERR_NOT_FOUND;
7240}
7241
7242
7243IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7244{
7245 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7246 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7247 if (pFpuCtx->FTW & RT_BIT(iReg))
7248 {
7249 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7250 return VINF_SUCCESS;
7251 }
7252 return VERR_NOT_FOUND;
7253}
7254
7255
7256IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7257 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7258{
7259 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7260 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7261 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7262 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7263 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7264 {
7265 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7266 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7267 return VINF_SUCCESS;
7268 }
7269 return VERR_NOT_FOUND;
7270}
7271
7272
7273IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7274{
7275 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7276 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7277 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7278 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7279 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7280 {
7281 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7282 return VINF_SUCCESS;
7283 }
7284 return VERR_NOT_FOUND;
7285}
7286
7287
7288/**
7289 * Updates the FPU exception status after FCW is changed.
7290 *
7291 * @param pFpuCtx The FPU context.
7292 */
7293IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7294{
7295 uint16_t u16Fsw = pFpuCtx->FSW;
7296 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7297 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7298 else
7299 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7300 pFpuCtx->FSW = u16Fsw;
7301}
7302
7303
7304/**
7305 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7306 *
7307 * @returns The full FTW.
7308 * @param pFpuCtx The FPU context.
7309 */
7310IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7311{
7312 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7313 uint16_t u16Ftw = 0;
7314 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7315 for (unsigned iSt = 0; iSt < 8; iSt++)
7316 {
7317 unsigned const iReg = (iSt + iTop) & 7;
7318 if (!(u8Ftw & RT_BIT(iReg)))
7319 u16Ftw |= 3 << (iReg * 2); /* empty */
7320 else
7321 {
7322 uint16_t uTag;
7323 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7324 if (pr80Reg->s.uExponent == 0x7fff)
7325 uTag = 2; /* Exponent is all 1's => Special. */
7326 else if (pr80Reg->s.uExponent == 0x0000)
7327 {
7328 if (pr80Reg->s.u64Mantissa == 0x0000)
7329 uTag = 1; /* All bits are zero => Zero. */
7330 else
7331 uTag = 2; /* Must be special. */
7332 }
7333 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7334 uTag = 0; /* Valid. */
7335 else
7336 uTag = 2; /* Must be special. */
7337
7338 u16Ftw |= uTag << (iReg * 2); /* empty */
7339 }
7340 }
7341
7342 return u16Ftw;
7343}
7344
7345
7346/**
7347 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7348 *
7349 * @returns The compressed FTW.
7350 * @param u16FullFtw The full FTW to convert.
7351 */
7352IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7353{
7354 uint8_t u8Ftw = 0;
7355 for (unsigned i = 0; i < 8; i++)
7356 {
7357 if ((u16FullFtw & 3) != 3 /*empty*/)
7358 u8Ftw |= RT_BIT(i);
7359 u16FullFtw >>= 2;
7360 }
7361
7362 return u8Ftw;
7363}
7364
7365/** @} */
7366
7367
7368/** @name Memory access.
7369 *
7370 * @{
7371 */
7372
7373
7374/**
7375 * Updates the IEMCPU::cbWritten counter if applicable.
7376 *
7377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7378 * @param fAccess The access being accounted for.
7379 * @param cbMem The access size.
7380 */
7381DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7382{
7383 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7384 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7385 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7386}
7387
7388
7389/**
7390 * Checks if the given segment can be written to, raise the appropriate
7391 * exception if not.
7392 *
7393 * @returns VBox strict status code.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param pHid Pointer to the hidden register.
7397 * @param iSegReg The register number.
7398 * @param pu64BaseAddr Where to return the base address to use for the
7399 * segment. (In 64-bit code it may differ from the
7400 * base in the hidden segment.)
7401 */
7402IEM_STATIC VBOXSTRICTRC
7403iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7404{
7405 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7406 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7407 else
7408 {
7409 if (!pHid->Attr.n.u1Present)
7410 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7411
7412 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7413 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7414 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7415 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7416 *pu64BaseAddr = pHid->u64Base;
7417 }
7418 return VINF_SUCCESS;
7419}
7420
7421
7422/**
7423 * Checks if the given segment can be read from, raise the appropriate
7424 * exception if not.
7425 *
7426 * @returns VBox strict status code.
7427 *
7428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7429 * @param pHid Pointer to the hidden register.
7430 * @param iSegReg The register number.
7431 * @param pu64BaseAddr Where to return the base address to use for the
7432 * segment. (In 64-bit code it may differ from the
7433 * base in the hidden segment.)
7434 */
7435IEM_STATIC VBOXSTRICTRC
7436iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7437{
7438 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7439 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7440 else
7441 {
7442 if (!pHid->Attr.n.u1Present)
7443 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7444
7445 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7446 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7447 *pu64BaseAddr = pHid->u64Base;
7448 }
7449 return VINF_SUCCESS;
7450}
7451
7452
7453/**
7454 * Applies the segment limit, base and attributes.
7455 *
7456 * This may raise a \#GP or \#SS.
7457 *
7458 * @returns VBox strict status code.
7459 *
7460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7461 * @param fAccess The kind of access which is being performed.
7462 * @param iSegReg The index of the segment register to apply.
7463 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7464 * TSS, ++).
7465 * @param cbMem The access size.
7466 * @param pGCPtrMem Pointer to the guest memory address to apply
7467 * segmentation to. Input and output parameter.
7468 */
7469IEM_STATIC VBOXSTRICTRC
7470iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7471{
7472 if (iSegReg == UINT8_MAX)
7473 return VINF_SUCCESS;
7474
7475 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7476 switch (pVCpu->iem.s.enmCpuMode)
7477 {
7478 case IEMMODE_16BIT:
7479 case IEMMODE_32BIT:
7480 {
7481 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7482 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7483
7484 if ( pSel->Attr.n.u1Present
7485 && !pSel->Attr.n.u1Unusable)
7486 {
7487 Assert(pSel->Attr.n.u1DescType);
7488 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7489 {
7490 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7491 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7492 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7493
7494 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7495 {
7496 /** @todo CPL check. */
7497 }
7498
7499 /*
7500 * There are two kinds of data selectors, normal and expand down.
7501 */
7502 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7503 {
7504 if ( GCPtrFirst32 > pSel->u32Limit
7505 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7506 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7507 }
7508 else
7509 {
7510 /*
7511 * The upper boundary is defined by the B bit, not the G bit!
7512 */
7513 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7514 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7515 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7516 }
7517 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7518 }
7519 else
7520 {
7521
7522 /*
7523 * Code selector and usually be used to read thru, writing is
7524 * only permitted in real and V8086 mode.
7525 */
7526 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7527 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7528 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7529 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7530 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7531
7532 if ( GCPtrFirst32 > pSel->u32Limit
7533 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7534 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7535
7536 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7537 {
7538 /** @todo CPL check. */
7539 }
7540
7541 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7542 }
7543 }
7544 else
7545 return iemRaiseGeneralProtectionFault0(pVCpu);
7546 return VINF_SUCCESS;
7547 }
7548
7549 case IEMMODE_64BIT:
7550 {
7551 RTGCPTR GCPtrMem = *pGCPtrMem;
7552 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7553 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7554
7555 Assert(cbMem >= 1);
7556 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7557 return VINF_SUCCESS;
7558 return iemRaiseGeneralProtectionFault0(pVCpu);
7559 }
7560
7561 default:
7562 AssertFailedReturn(VERR_IEM_IPE_7);
7563 }
7564}
7565
7566
7567/**
7568 * Translates a virtual address to a physical physical address and checks if we
7569 * can access the page as specified.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param GCPtrMem The virtual address.
7573 * @param fAccess The intended access.
7574 * @param pGCPhysMem Where to return the physical address.
7575 */
7576IEM_STATIC VBOXSTRICTRC
7577iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7578{
7579 /** @todo Need a different PGM interface here. We're currently using
7580 * generic / REM interfaces. this won't cut it for R0 & RC. */
7581 RTGCPHYS GCPhys;
7582 uint64_t fFlags;
7583 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7584 if (RT_FAILURE(rc))
7585 {
7586 /** @todo Check unassigned memory in unpaged mode. */
7587 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7588 *pGCPhysMem = NIL_RTGCPHYS;
7589 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7590 }
7591
7592 /* If the page is writable and does not have the no-exec bit set, all
7593 access is allowed. Otherwise we'll have to check more carefully... */
7594 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7595 {
7596 /* Write to read only memory? */
7597 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7598 && !(fFlags & X86_PTE_RW)
7599 && ( pVCpu->iem.s.uCpl != 0
7600 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7601 {
7602 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7603 *pGCPhysMem = NIL_RTGCPHYS;
7604 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7605 }
7606
7607 /* Kernel memory accessed by userland? */
7608 if ( !(fFlags & X86_PTE_US)
7609 && pVCpu->iem.s.uCpl == 3
7610 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7611 {
7612 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7613 *pGCPhysMem = NIL_RTGCPHYS;
7614 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7615 }
7616
7617 /* Executing non-executable memory? */
7618 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7619 && (fFlags & X86_PTE_PAE_NX)
7620 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7621 {
7622 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7623 *pGCPhysMem = NIL_RTGCPHYS;
7624 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7625 VERR_ACCESS_DENIED);
7626 }
7627 }
7628
7629 /*
7630 * Set the dirty / access flags.
7631 * ASSUMES this is set when the address is translated rather than on committ...
7632 */
7633 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7634 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7635 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7636 {
7637 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7638 AssertRC(rc2);
7639 }
7640
7641 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7642 *pGCPhysMem = GCPhys;
7643 return VINF_SUCCESS;
7644}
7645
7646
7647
7648/**
7649 * Maps a physical page.
7650 *
7651 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7653 * @param GCPhysMem The physical address.
7654 * @param fAccess The intended access.
7655 * @param ppvMem Where to return the mapping address.
7656 * @param pLock The PGM lock.
7657 */
7658IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7659{
7660#ifdef IEM_VERIFICATION_MODE_FULL
7661 /* Force the alternative path so we can ignore writes. */
7662 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7663 {
7664 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7665 {
7666 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7667 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7668 if (RT_FAILURE(rc2))
7669 pVCpu->iem.s.fProblematicMemory = true;
7670 }
7671 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7672 }
7673#endif
7674#ifdef IEM_LOG_MEMORY_WRITES
7675 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7676 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7677#endif
7678#ifdef IEM_VERIFICATION_MODE_MINIMAL
7679 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7680#endif
7681
7682 /** @todo This API may require some improving later. A private deal with PGM
7683 * regarding locking and unlocking needs to be struct. A couple of TLBs
7684 * living in PGM, but with publicly accessible inlined access methods
7685 * could perhaps be an even better solution. */
7686 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7687 GCPhysMem,
7688 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7689 pVCpu->iem.s.fBypassHandlers,
7690 ppvMem,
7691 pLock);
7692 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7693 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7694
7695#ifdef IEM_VERIFICATION_MODE_FULL
7696 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7697 pVCpu->iem.s.fProblematicMemory = true;
7698#endif
7699 return rc;
7700}
7701
7702
7703/**
7704 * Unmap a page previously mapped by iemMemPageMap.
7705 *
7706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7707 * @param GCPhysMem The physical address.
7708 * @param fAccess The intended access.
7709 * @param pvMem What iemMemPageMap returned.
7710 * @param pLock The PGM lock.
7711 */
7712DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7713{
7714 NOREF(pVCpu);
7715 NOREF(GCPhysMem);
7716 NOREF(fAccess);
7717 NOREF(pvMem);
7718 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7719}
7720
7721
7722/**
7723 * Looks up a memory mapping entry.
7724 *
7725 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param pvMem The memory address.
7728 * @param fAccess The access to.
7729 */
7730DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7731{
7732 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7733 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7734 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7735 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7736 return 0;
7737 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7738 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7739 return 1;
7740 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7741 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7742 return 2;
7743 return VERR_NOT_FOUND;
7744}
7745
7746
7747/**
7748 * Finds a free memmap entry when using iNextMapping doesn't work.
7749 *
7750 * @returns Memory mapping index, 1024 on failure.
7751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7752 */
7753IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7754{
7755 /*
7756 * The easy case.
7757 */
7758 if (pVCpu->iem.s.cActiveMappings == 0)
7759 {
7760 pVCpu->iem.s.iNextMapping = 1;
7761 return 0;
7762 }
7763
7764 /* There should be enough mappings for all instructions. */
7765 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7766
7767 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7768 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7769 return i;
7770
7771 AssertFailedReturn(1024);
7772}
7773
7774
7775/**
7776 * Commits a bounce buffer that needs writing back and unmaps it.
7777 *
7778 * @returns Strict VBox status code.
7779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7780 * @param iMemMap The index of the buffer to commit.
7781 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7782 * Always false in ring-3, obviously.
7783 */
7784IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7785{
7786 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7787 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7788#ifdef IN_RING3
7789 Assert(!fPostponeFail);
7790 RT_NOREF_PV(fPostponeFail);
7791#endif
7792
7793 /*
7794 * Do the writing.
7795 */
7796#ifndef IEM_VERIFICATION_MODE_MINIMAL
7797 PVM pVM = pVCpu->CTX_SUFF(pVM);
7798 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7799 && !IEM_VERIFICATION_ENABLED(pVCpu))
7800 {
7801 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7802 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7803 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7804 if (!pVCpu->iem.s.fBypassHandlers)
7805 {
7806 /*
7807 * Carefully and efficiently dealing with access handler return
7808 * codes make this a little bloated.
7809 */
7810 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7812 pbBuf,
7813 cbFirst,
7814 PGMACCESSORIGIN_IEM);
7815 if (rcStrict == VINF_SUCCESS)
7816 {
7817 if (cbSecond)
7818 {
7819 rcStrict = PGMPhysWrite(pVM,
7820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7821 pbBuf + cbFirst,
7822 cbSecond,
7823 PGMACCESSORIGIN_IEM);
7824 if (rcStrict == VINF_SUCCESS)
7825 { /* nothing */ }
7826 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7827 {
7828 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7831 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7832 }
7833# ifndef IN_RING3
7834 else if (fPostponeFail)
7835 {
7836 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7837 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7839 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7840 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7841 return iemSetPassUpStatus(pVCpu, rcStrict);
7842 }
7843# endif
7844 else
7845 {
7846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7849 return rcStrict;
7850 }
7851 }
7852 }
7853 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7854 {
7855 if (!cbSecond)
7856 {
7857 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7860 }
7861 else
7862 {
7863 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7865 pbBuf + cbFirst,
7866 cbSecond,
7867 PGMACCESSORIGIN_IEM);
7868 if (rcStrict2 == VINF_SUCCESS)
7869 {
7870 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7873 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7874 }
7875 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7876 {
7877 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7880 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7882 }
7883# ifndef IN_RING3
7884 else if (fPostponeFail)
7885 {
7886 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7889 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7890 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7891 return iemSetPassUpStatus(pVCpu, rcStrict);
7892 }
7893# endif
7894 else
7895 {
7896 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7898 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7899 return rcStrict2;
7900 }
7901 }
7902 }
7903# ifndef IN_RING3
7904 else if (fPostponeFail)
7905 {
7906 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7908 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7909 if (!cbSecond)
7910 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7911 else
7912 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7913 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7914 return iemSetPassUpStatus(pVCpu, rcStrict);
7915 }
7916# endif
7917 else
7918 {
7919 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7922 return rcStrict;
7923 }
7924 }
7925 else
7926 {
7927 /*
7928 * No access handlers, much simpler.
7929 */
7930 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7931 if (RT_SUCCESS(rc))
7932 {
7933 if (cbSecond)
7934 {
7935 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7936 if (RT_SUCCESS(rc))
7937 { /* likely */ }
7938 else
7939 {
7940 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7943 return rc;
7944 }
7945 }
7946 }
7947 else
7948 {
7949 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7950 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7951 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7952 return rc;
7953 }
7954 }
7955 }
7956#endif
7957
7958#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7959 /*
7960 * Record the write(s).
7961 */
7962 if (!pVCpu->iem.s.fNoRem)
7963 {
7964 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7965 if (pEvtRec)
7966 {
7967 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7968 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7969 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7970 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7971 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7972 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7973 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7974 }
7975 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7976 {
7977 pEvtRec = iemVerifyAllocRecord(pVCpu);
7978 if (pEvtRec)
7979 {
7980 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7981 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7982 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7983 memcpy(pEvtRec->u.RamWrite.ab,
7984 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7985 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7986 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7987 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7988 }
7989 }
7990 }
7991#endif
7992#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7993 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7994 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7995 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7996 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7997 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7998 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7999
8000 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8001 g_cbIemWrote = cbWrote;
8002 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8003#endif
8004
8005 /*
8006 * Free the mapping entry.
8007 */
8008 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8009 Assert(pVCpu->iem.s.cActiveMappings != 0);
8010 pVCpu->iem.s.cActiveMappings--;
8011 return VINF_SUCCESS;
8012}
8013
8014
8015/**
8016 * iemMemMap worker that deals with a request crossing pages.
8017 */
8018IEM_STATIC VBOXSTRICTRC
8019iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8020{
8021 /*
8022 * Do the address translations.
8023 */
8024 RTGCPHYS GCPhysFirst;
8025 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8026 if (rcStrict != VINF_SUCCESS)
8027 return rcStrict;
8028
8029 RTGCPHYS GCPhysSecond;
8030 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8031 fAccess, &GCPhysSecond);
8032 if (rcStrict != VINF_SUCCESS)
8033 return rcStrict;
8034 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8035
8036 PVM pVM = pVCpu->CTX_SUFF(pVM);
8037#ifdef IEM_VERIFICATION_MODE_FULL
8038 /*
8039 * Detect problematic memory when verifying so we can select
8040 * the right execution engine. (TLB: Redo this.)
8041 */
8042 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8043 {
8044 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8045 if (RT_SUCCESS(rc2))
8046 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8047 if (RT_FAILURE(rc2))
8048 pVCpu->iem.s.fProblematicMemory = true;
8049 }
8050#endif
8051
8052
8053 /*
8054 * Read in the current memory content if it's a read, execute or partial
8055 * write access.
8056 */
8057 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8058 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8059 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8060
8061 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8062 {
8063 if (!pVCpu->iem.s.fBypassHandlers)
8064 {
8065 /*
8066 * Must carefully deal with access handler status codes here,
8067 * makes the code a bit bloated.
8068 */
8069 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8070 if (rcStrict == VINF_SUCCESS)
8071 {
8072 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8073 if (rcStrict == VINF_SUCCESS)
8074 { /*likely */ }
8075 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8076 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8077 else
8078 {
8079 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8080 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8081 return rcStrict;
8082 }
8083 }
8084 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8085 {
8086 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8087 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8088 {
8089 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8090 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8091 }
8092 else
8093 {
8094 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8095 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8096 return rcStrict2;
8097 }
8098 }
8099 else
8100 {
8101 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8102 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8103 return rcStrict;
8104 }
8105 }
8106 else
8107 {
8108 /*
8109 * No informational status codes here, much more straight forward.
8110 */
8111 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8112 if (RT_SUCCESS(rc))
8113 {
8114 Assert(rc == VINF_SUCCESS);
8115 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8116 if (RT_SUCCESS(rc))
8117 Assert(rc == VINF_SUCCESS);
8118 else
8119 {
8120 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8121 return rc;
8122 }
8123 }
8124 else
8125 {
8126 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8127 return rc;
8128 }
8129 }
8130
8131#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8132 if ( !pVCpu->iem.s.fNoRem
8133 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8134 {
8135 /*
8136 * Record the reads.
8137 */
8138 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8139 if (pEvtRec)
8140 {
8141 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8142 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8143 pEvtRec->u.RamRead.cb = cbFirstPage;
8144 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8145 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8146 }
8147 pEvtRec = iemVerifyAllocRecord(pVCpu);
8148 if (pEvtRec)
8149 {
8150 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8151 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8152 pEvtRec->u.RamRead.cb = cbSecondPage;
8153 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8154 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8155 }
8156 }
8157#endif
8158 }
8159#ifdef VBOX_STRICT
8160 else
8161 memset(pbBuf, 0xcc, cbMem);
8162 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8163 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8164#endif
8165
8166 /*
8167 * Commit the bounce buffer entry.
8168 */
8169 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8171 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8172 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8173 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8174 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8175 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8176 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8177 pVCpu->iem.s.cActiveMappings++;
8178
8179 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8180 *ppvMem = pbBuf;
8181 return VINF_SUCCESS;
8182}
8183
8184
8185/**
8186 * iemMemMap woker that deals with iemMemPageMap failures.
8187 */
8188IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8189 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8190{
8191 /*
8192 * Filter out conditions we can handle and the ones which shouldn't happen.
8193 */
8194 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8195 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8196 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8197 {
8198 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8199 return rcMap;
8200 }
8201 pVCpu->iem.s.cPotentialExits++;
8202
8203 /*
8204 * Read in the current memory content if it's a read, execute or partial
8205 * write access.
8206 */
8207 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8208 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8209 {
8210 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8211 memset(pbBuf, 0xff, cbMem);
8212 else
8213 {
8214 int rc;
8215 if (!pVCpu->iem.s.fBypassHandlers)
8216 {
8217 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8218 if (rcStrict == VINF_SUCCESS)
8219 { /* nothing */ }
8220 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8221 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8222 else
8223 {
8224 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8225 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8226 return rcStrict;
8227 }
8228 }
8229 else
8230 {
8231 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8232 if (RT_SUCCESS(rc))
8233 { /* likely */ }
8234 else
8235 {
8236 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8237 GCPhysFirst, rc));
8238 return rc;
8239 }
8240 }
8241 }
8242
8243#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8244 if ( !pVCpu->iem.s.fNoRem
8245 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8246 {
8247 /*
8248 * Record the read.
8249 */
8250 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8251 if (pEvtRec)
8252 {
8253 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8254 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8255 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8256 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8257 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8258 }
8259 }
8260#endif
8261 }
8262#ifdef VBOX_STRICT
8263 else
8264 memset(pbBuf, 0xcc, cbMem);
8265#endif
8266#ifdef VBOX_STRICT
8267 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8268 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8269#endif
8270
8271 /*
8272 * Commit the bounce buffer entry.
8273 */
8274 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8275 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8276 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8277 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8278 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8279 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8280 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8281 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8282 pVCpu->iem.s.cActiveMappings++;
8283
8284 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8285 *ppvMem = pbBuf;
8286 return VINF_SUCCESS;
8287}
8288
8289
8290
8291/**
8292 * Maps the specified guest memory for the given kind of access.
8293 *
8294 * This may be using bounce buffering of the memory if it's crossing a page
8295 * boundary or if there is an access handler installed for any of it. Because
8296 * of lock prefix guarantees, we're in for some extra clutter when this
8297 * happens.
8298 *
8299 * This may raise a \#GP, \#SS, \#PF or \#AC.
8300 *
8301 * @returns VBox strict status code.
8302 *
8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8304 * @param ppvMem Where to return the pointer to the mapped
8305 * memory.
8306 * @param cbMem The number of bytes to map. This is usually 1,
8307 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8308 * string operations it can be up to a page.
8309 * @param iSegReg The index of the segment register to use for
8310 * this access. The base and limits are checked.
8311 * Use UINT8_MAX to indicate that no segmentation
8312 * is required (for IDT, GDT and LDT accesses).
8313 * @param GCPtrMem The address of the guest memory.
8314 * @param fAccess How the memory is being accessed. The
8315 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8316 * how to map the memory, while the
8317 * IEM_ACCESS_WHAT_XXX bit is used when raising
8318 * exceptions.
8319 */
8320IEM_STATIC VBOXSTRICTRC
8321iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8322{
8323 /*
8324 * Check the input and figure out which mapping entry to use.
8325 */
8326 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8327 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8328 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8329
8330 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8331 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8332 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8333 {
8334 iMemMap = iemMemMapFindFree(pVCpu);
8335 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8336 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8337 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8338 pVCpu->iem.s.aMemMappings[2].fAccess),
8339 VERR_IEM_IPE_9);
8340 }
8341
8342 /*
8343 * Map the memory, checking that we can actually access it. If something
8344 * slightly complicated happens, fall back on bounce buffering.
8345 */
8346 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8347 if (rcStrict != VINF_SUCCESS)
8348 return rcStrict;
8349
8350 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8351 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8352
8353 RTGCPHYS GCPhysFirst;
8354 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8355 if (rcStrict != VINF_SUCCESS)
8356 return rcStrict;
8357
8358 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8359 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8360 if (fAccess & IEM_ACCESS_TYPE_READ)
8361 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8362
8363 void *pvMem;
8364 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8365 if (rcStrict != VINF_SUCCESS)
8366 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8367
8368 /*
8369 * Fill in the mapping table entry.
8370 */
8371 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8373 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8374 pVCpu->iem.s.cActiveMappings++;
8375
8376 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8377 *ppvMem = pvMem;
8378 return VINF_SUCCESS;
8379}
8380
8381
8382/**
8383 * Commits the guest memory if bounce buffered and unmaps it.
8384 *
8385 * @returns Strict VBox status code.
8386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8387 * @param pvMem The mapping.
8388 * @param fAccess The kind of access.
8389 */
8390IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8391{
8392 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8393 AssertReturn(iMemMap >= 0, iMemMap);
8394
8395 /* If it's bounce buffered, we may need to write back the buffer. */
8396 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8397 {
8398 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8399 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8400 }
8401 /* Otherwise unlock it. */
8402 else
8403 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8404
8405 /* Free the entry. */
8406 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8407 Assert(pVCpu->iem.s.cActiveMappings != 0);
8408 pVCpu->iem.s.cActiveMappings--;
8409 return VINF_SUCCESS;
8410}
8411
8412#ifdef IEM_WITH_SETJMP
8413
8414/**
8415 * Maps the specified guest memory for the given kind of access, longjmp on
8416 * error.
8417 *
8418 * This may be using bounce buffering of the memory if it's crossing a page
8419 * boundary or if there is an access handler installed for any of it. Because
8420 * of lock prefix guarantees, we're in for some extra clutter when this
8421 * happens.
8422 *
8423 * This may raise a \#GP, \#SS, \#PF or \#AC.
8424 *
8425 * @returns Pointer to the mapped memory.
8426 *
8427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8428 * @param cbMem The number of bytes to map. This is usually 1,
8429 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8430 * string operations it can be up to a page.
8431 * @param iSegReg The index of the segment register to use for
8432 * this access. The base and limits are checked.
8433 * Use UINT8_MAX to indicate that no segmentation
8434 * is required (for IDT, GDT and LDT accesses).
8435 * @param GCPtrMem The address of the guest memory.
8436 * @param fAccess How the memory is being accessed. The
8437 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8438 * how to map the memory, while the
8439 * IEM_ACCESS_WHAT_XXX bit is used when raising
8440 * exceptions.
8441 */
8442IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8443{
8444 /*
8445 * Check the input and figure out which mapping entry to use.
8446 */
8447 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8448 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8449 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8450
8451 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8452 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8453 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8454 {
8455 iMemMap = iemMemMapFindFree(pVCpu);
8456 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8457 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8458 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8459 pVCpu->iem.s.aMemMappings[2].fAccess),
8460 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8461 }
8462
8463 /*
8464 * Map the memory, checking that we can actually access it. If something
8465 * slightly complicated happens, fall back on bounce buffering.
8466 */
8467 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8468 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8469 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8470
8471 /* Crossing a page boundary? */
8472 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8473 { /* No (likely). */ }
8474 else
8475 {
8476 void *pvMem;
8477 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8478 if (rcStrict == VINF_SUCCESS)
8479 return pvMem;
8480 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8481 }
8482
8483 RTGCPHYS GCPhysFirst;
8484 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8485 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8486 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8487
8488 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8489 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8490 if (fAccess & IEM_ACCESS_TYPE_READ)
8491 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8492
8493 void *pvMem;
8494 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8495 if (rcStrict == VINF_SUCCESS)
8496 { /* likely */ }
8497 else
8498 {
8499 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8500 if (rcStrict == VINF_SUCCESS)
8501 return pvMem;
8502 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8503 }
8504
8505 /*
8506 * Fill in the mapping table entry.
8507 */
8508 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8510 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8511 pVCpu->iem.s.cActiveMappings++;
8512
8513 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8514 return pvMem;
8515}
8516
8517
8518/**
8519 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8520 *
8521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8522 * @param pvMem The mapping.
8523 * @param fAccess The kind of access.
8524 */
8525IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8526{
8527 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8528 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8529
8530 /* If it's bounce buffered, we may need to write back the buffer. */
8531 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8532 {
8533 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8534 {
8535 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8536 if (rcStrict == VINF_SUCCESS)
8537 return;
8538 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8539 }
8540 }
8541 /* Otherwise unlock it. */
8542 else
8543 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8544
8545 /* Free the entry. */
8546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8547 Assert(pVCpu->iem.s.cActiveMappings != 0);
8548 pVCpu->iem.s.cActiveMappings--;
8549}
8550
8551#endif
8552
8553#ifndef IN_RING3
8554/**
8555 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8556 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8557 *
8558 * Allows the instruction to be completed and retired, while the IEM user will
8559 * return to ring-3 immediately afterwards and do the postponed writes there.
8560 *
8561 * @returns VBox status code (no strict statuses). Caller must check
8562 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8564 * @param pvMem The mapping.
8565 * @param fAccess The kind of access.
8566 */
8567IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8568{
8569 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8570 AssertReturn(iMemMap >= 0, iMemMap);
8571
8572 /* If it's bounce buffered, we may need to write back the buffer. */
8573 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8574 {
8575 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8576 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8577 }
8578 /* Otherwise unlock it. */
8579 else
8580 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8581
8582 /* Free the entry. */
8583 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8584 Assert(pVCpu->iem.s.cActiveMappings != 0);
8585 pVCpu->iem.s.cActiveMappings--;
8586 return VINF_SUCCESS;
8587}
8588#endif
8589
8590
8591/**
8592 * Rollbacks mappings, releasing page locks and such.
8593 *
8594 * The caller shall only call this after checking cActiveMappings.
8595 *
8596 * @returns Strict VBox status code to pass up.
8597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8598 */
8599IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8600{
8601 Assert(pVCpu->iem.s.cActiveMappings > 0);
8602
8603 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8604 while (iMemMap-- > 0)
8605 {
8606 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8607 if (fAccess != IEM_ACCESS_INVALID)
8608 {
8609 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8610 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8611 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8612 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8613 Assert(pVCpu->iem.s.cActiveMappings > 0);
8614 pVCpu->iem.s.cActiveMappings--;
8615 }
8616 }
8617}
8618
8619
8620/**
8621 * Fetches a data byte.
8622 *
8623 * @returns Strict VBox status code.
8624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8625 * @param pu8Dst Where to return the byte.
8626 * @param iSegReg The index of the segment register to use for
8627 * this access. The base and limits are checked.
8628 * @param GCPtrMem The address of the guest memory.
8629 */
8630IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8631{
8632 /* The lazy approach for now... */
8633 uint8_t const *pu8Src;
8634 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8635 if (rc == VINF_SUCCESS)
8636 {
8637 *pu8Dst = *pu8Src;
8638 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8639 }
8640 return rc;
8641}
8642
8643
8644#ifdef IEM_WITH_SETJMP
8645/**
8646 * Fetches a data byte, longjmp on error.
8647 *
8648 * @returns The byte.
8649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8650 * @param iSegReg The index of the segment register to use for
8651 * this access. The base and limits are checked.
8652 * @param GCPtrMem The address of the guest memory.
8653 */
8654DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8655{
8656 /* The lazy approach for now... */
8657 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8658 uint8_t const bRet = *pu8Src;
8659 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8660 return bRet;
8661}
8662#endif /* IEM_WITH_SETJMP */
8663
8664
8665/**
8666 * Fetches a data word.
8667 *
8668 * @returns Strict VBox status code.
8669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8670 * @param pu16Dst Where to return the word.
8671 * @param iSegReg The index of the segment register to use for
8672 * this access. The base and limits are checked.
8673 * @param GCPtrMem The address of the guest memory.
8674 */
8675IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8676{
8677 /* The lazy approach for now... */
8678 uint16_t const *pu16Src;
8679 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8680 if (rc == VINF_SUCCESS)
8681 {
8682 *pu16Dst = *pu16Src;
8683 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8684 }
8685 return rc;
8686}
8687
8688
8689#ifdef IEM_WITH_SETJMP
8690/**
8691 * Fetches a data word, longjmp on error.
8692 *
8693 * @returns The word
8694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8695 * @param iSegReg The index of the segment register to use for
8696 * this access. The base and limits are checked.
8697 * @param GCPtrMem The address of the guest memory.
8698 */
8699DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8700{
8701 /* The lazy approach for now... */
8702 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8703 uint16_t const u16Ret = *pu16Src;
8704 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8705 return u16Ret;
8706}
8707#endif
8708
8709
8710/**
8711 * Fetches a data dword.
8712 *
8713 * @returns Strict VBox status code.
8714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8715 * @param pu32Dst Where to return the dword.
8716 * @param iSegReg The index of the segment register to use for
8717 * this access. The base and limits are checked.
8718 * @param GCPtrMem The address of the guest memory.
8719 */
8720IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8721{
8722 /* The lazy approach for now... */
8723 uint32_t const *pu32Src;
8724 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8725 if (rc == VINF_SUCCESS)
8726 {
8727 *pu32Dst = *pu32Src;
8728 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8729 }
8730 return rc;
8731}
8732
8733
8734#ifdef IEM_WITH_SETJMP
8735
8736IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8737{
8738 Assert(cbMem >= 1);
8739 Assert(iSegReg < X86_SREG_COUNT);
8740
8741 /*
8742 * 64-bit mode is simpler.
8743 */
8744 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8745 {
8746 if (iSegReg >= X86_SREG_FS)
8747 {
8748 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8749 GCPtrMem += pSel->u64Base;
8750 }
8751
8752 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8753 return GCPtrMem;
8754 }
8755 /*
8756 * 16-bit and 32-bit segmentation.
8757 */
8758 else
8759 {
8760 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8761 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8762 == X86DESCATTR_P /* data, expand up */
8763 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8764 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8765 {
8766 /* expand up */
8767 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8768 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8769 && GCPtrLast32 > (uint32_t)GCPtrMem))
8770 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8771 }
8772 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8773 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8774 {
8775 /* expand down */
8776 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8777 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8778 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8779 && GCPtrLast32 > (uint32_t)GCPtrMem))
8780 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8781 }
8782 else
8783 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8784 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8785 }
8786 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8787}
8788
8789
8790IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8791{
8792 Assert(cbMem >= 1);
8793 Assert(iSegReg < X86_SREG_COUNT);
8794
8795 /*
8796 * 64-bit mode is simpler.
8797 */
8798 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8799 {
8800 if (iSegReg >= X86_SREG_FS)
8801 {
8802 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8803 GCPtrMem += pSel->u64Base;
8804 }
8805
8806 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8807 return GCPtrMem;
8808 }
8809 /*
8810 * 16-bit and 32-bit segmentation.
8811 */
8812 else
8813 {
8814 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8815 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8816 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8817 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8818 {
8819 /* expand up */
8820 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8821 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8822 && GCPtrLast32 > (uint32_t)GCPtrMem))
8823 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8824 }
8825 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8826 {
8827 /* expand down */
8828 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8829 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8830 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8831 && GCPtrLast32 > (uint32_t)GCPtrMem))
8832 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8833 }
8834 else
8835 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8836 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8837 }
8838 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8839}
8840
8841
8842/**
8843 * Fetches a data dword, longjmp on error, fallback/safe version.
8844 *
8845 * @returns The dword
8846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8847 * @param iSegReg The index of the segment register to use for
8848 * this access. The base and limits are checked.
8849 * @param GCPtrMem The address of the guest memory.
8850 */
8851IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8852{
8853 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8854 uint32_t const u32Ret = *pu32Src;
8855 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8856 return u32Ret;
8857}
8858
8859
8860/**
8861 * Fetches a data dword, longjmp on error.
8862 *
8863 * @returns The dword
8864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8865 * @param iSegReg The index of the segment register to use for
8866 * this access. The base and limits are checked.
8867 * @param GCPtrMem The address of the guest memory.
8868 */
8869DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8870{
8871# ifdef IEM_WITH_DATA_TLB
8872 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8873 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8874 {
8875 /// @todo more later.
8876 }
8877
8878 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8879# else
8880 /* The lazy approach. */
8881 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8882 uint32_t const u32Ret = *pu32Src;
8883 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8884 return u32Ret;
8885# endif
8886}
8887#endif
8888
8889
8890#ifdef SOME_UNUSED_FUNCTION
8891/**
8892 * Fetches a data dword and sign extends it to a qword.
8893 *
8894 * @returns Strict VBox status code.
8895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8896 * @param pu64Dst Where to return the sign extended value.
8897 * @param iSegReg The index of the segment register to use for
8898 * this access. The base and limits are checked.
8899 * @param GCPtrMem The address of the guest memory.
8900 */
8901IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8902{
8903 /* The lazy approach for now... */
8904 int32_t const *pi32Src;
8905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8906 if (rc == VINF_SUCCESS)
8907 {
8908 *pu64Dst = *pi32Src;
8909 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8910 }
8911#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8912 else
8913 *pu64Dst = 0;
8914#endif
8915 return rc;
8916}
8917#endif
8918
8919
8920/**
8921 * Fetches a data qword.
8922 *
8923 * @returns Strict VBox status code.
8924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8925 * @param pu64Dst Where to return the qword.
8926 * @param iSegReg The index of the segment register to use for
8927 * this access. The base and limits are checked.
8928 * @param GCPtrMem The address of the guest memory.
8929 */
8930IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8931{
8932 /* The lazy approach for now... */
8933 uint64_t const *pu64Src;
8934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8935 if (rc == VINF_SUCCESS)
8936 {
8937 *pu64Dst = *pu64Src;
8938 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8939 }
8940 return rc;
8941}
8942
8943
8944#ifdef IEM_WITH_SETJMP
8945/**
8946 * Fetches a data qword, longjmp on error.
8947 *
8948 * @returns The qword.
8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8950 * @param iSegReg The index of the segment register to use for
8951 * this access. The base and limits are checked.
8952 * @param GCPtrMem The address of the guest memory.
8953 */
8954DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8955{
8956 /* The lazy approach for now... */
8957 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8958 uint64_t const u64Ret = *pu64Src;
8959 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8960 return u64Ret;
8961}
8962#endif
8963
8964
8965/**
8966 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8967 *
8968 * @returns Strict VBox status code.
8969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8970 * @param pu64Dst Where to return the qword.
8971 * @param iSegReg The index of the segment register to use for
8972 * this access. The base and limits are checked.
8973 * @param GCPtrMem The address of the guest memory.
8974 */
8975IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8976{
8977 /* The lazy approach for now... */
8978 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8979 if (RT_UNLIKELY(GCPtrMem & 15))
8980 return iemRaiseGeneralProtectionFault0(pVCpu);
8981
8982 uint64_t const *pu64Src;
8983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8984 if (rc == VINF_SUCCESS)
8985 {
8986 *pu64Dst = *pu64Src;
8987 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8988 }
8989 return rc;
8990}
8991
8992
8993#ifdef IEM_WITH_SETJMP
8994/**
8995 * Fetches a data qword, longjmp on error.
8996 *
8997 * @returns The qword.
8998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8999 * @param iSegReg The index of the segment register to use for
9000 * this access. The base and limits are checked.
9001 * @param GCPtrMem The address of the guest memory.
9002 */
9003DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9004{
9005 /* The lazy approach for now... */
9006 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9007 if (RT_LIKELY(!(GCPtrMem & 15)))
9008 {
9009 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9010 uint64_t const u64Ret = *pu64Src;
9011 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9012 return u64Ret;
9013 }
9014
9015 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9016 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9017}
9018#endif
9019
9020
9021/**
9022 * Fetches a data tword.
9023 *
9024 * @returns Strict VBox status code.
9025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9026 * @param pr80Dst Where to return the tword.
9027 * @param iSegReg The index of the segment register to use for
9028 * this access. The base and limits are checked.
9029 * @param GCPtrMem The address of the guest memory.
9030 */
9031IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9032{
9033 /* The lazy approach for now... */
9034 PCRTFLOAT80U pr80Src;
9035 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9036 if (rc == VINF_SUCCESS)
9037 {
9038 *pr80Dst = *pr80Src;
9039 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9040 }
9041 return rc;
9042}
9043
9044
9045#ifdef IEM_WITH_SETJMP
9046/**
9047 * Fetches a data tword, longjmp on error.
9048 *
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pr80Dst Where to return the tword.
9051 * @param iSegReg The index of the segment register to use for
9052 * this access. The base and limits are checked.
9053 * @param GCPtrMem The address of the guest memory.
9054 */
9055DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9056{
9057 /* The lazy approach for now... */
9058 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9059 *pr80Dst = *pr80Src;
9060 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9061}
9062#endif
9063
9064
9065/**
9066 * Fetches a data dqword (double qword), generally SSE related.
9067 *
9068 * @returns Strict VBox status code.
9069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9070 * @param pu128Dst Where to return the qword.
9071 * @param iSegReg The index of the segment register to use for
9072 * this access. The base and limits are checked.
9073 * @param GCPtrMem The address of the guest memory.
9074 */
9075IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9076{
9077 /* The lazy approach for now... */
9078 uint128_t const *pu128Src;
9079 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9080 if (rc == VINF_SUCCESS)
9081 {
9082 *pu128Dst = *pu128Src;
9083 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9084 }
9085 return rc;
9086}
9087
9088
9089#ifdef IEM_WITH_SETJMP
9090/**
9091 * Fetches a data dqword (double qword), generally SSE related.
9092 *
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param pu128Dst Where to return the qword.
9095 * @param iSegReg The index of the segment register to use for
9096 * this access. The base and limits are checked.
9097 * @param GCPtrMem The address of the guest memory.
9098 */
9099IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9100{
9101 /* The lazy approach for now... */
9102 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9103 *pu128Dst = *pu128Src;
9104 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9105}
9106#endif
9107
9108
9109/**
9110 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9111 * related.
9112 *
9113 * Raises \#GP(0) if not aligned.
9114 *
9115 * @returns Strict VBox status code.
9116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9117 * @param pu128Dst Where to return the qword.
9118 * @param iSegReg The index of the segment register to use for
9119 * this access. The base and limits are checked.
9120 * @param GCPtrMem The address of the guest memory.
9121 */
9122IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9123{
9124 /* The lazy approach for now... */
9125 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9126 if ( (GCPtrMem & 15)
9127 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9128 return iemRaiseGeneralProtectionFault0(pVCpu);
9129
9130 uint128_t const *pu128Src;
9131 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9132 if (rc == VINF_SUCCESS)
9133 {
9134 *pu128Dst = *pu128Src;
9135 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9136 }
9137 return rc;
9138}
9139
9140
9141#ifdef IEM_WITH_SETJMP
9142/**
9143 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9144 * related, longjmp on error.
9145 *
9146 * Raises \#GP(0) if not aligned.
9147 *
9148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9149 * @param pu128Dst Where to return the qword.
9150 * @param iSegReg The index of the segment register to use for
9151 * this access. The base and limits are checked.
9152 * @param GCPtrMem The address of the guest memory.
9153 */
9154DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9155{
9156 /* The lazy approach for now... */
9157 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9158 if ( (GCPtrMem & 15) == 0
9159 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9160 {
9161 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9162 IEM_ACCESS_DATA_R);
9163 *pu128Dst = *pu128Src;
9164 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9165 return;
9166 }
9167
9168 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9169 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9170}
9171#endif
9172
9173
9174
9175/**
9176 * Fetches a descriptor register (lgdt, lidt).
9177 *
9178 * @returns Strict VBox status code.
9179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9180 * @param pcbLimit Where to return the limit.
9181 * @param pGCPtrBase Where to return the base.
9182 * @param iSegReg The index of the segment register to use for
9183 * this access. The base and limits are checked.
9184 * @param GCPtrMem The address of the guest memory.
9185 * @param enmOpSize The effective operand size.
9186 */
9187IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9188 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9189{
9190 /*
9191 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9192 * little special:
9193 * - The two reads are done separately.
9194 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9195 * - We suspect the 386 to actually commit the limit before the base in
9196 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9197 * don't try emulate this eccentric behavior, because it's not well
9198 * enough understood and rather hard to trigger.
9199 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9200 */
9201 VBOXSTRICTRC rcStrict;
9202 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9203 {
9204 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9205 if (rcStrict == VINF_SUCCESS)
9206 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9207 }
9208 else
9209 {
9210 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9211 if (enmOpSize == IEMMODE_32BIT)
9212 {
9213 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9214 {
9215 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9216 if (rcStrict == VINF_SUCCESS)
9217 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9218 }
9219 else
9220 {
9221 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9222 if (rcStrict == VINF_SUCCESS)
9223 {
9224 *pcbLimit = (uint16_t)uTmp;
9225 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9226 }
9227 }
9228 if (rcStrict == VINF_SUCCESS)
9229 *pGCPtrBase = uTmp;
9230 }
9231 else
9232 {
9233 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9234 if (rcStrict == VINF_SUCCESS)
9235 {
9236 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9237 if (rcStrict == VINF_SUCCESS)
9238 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9239 }
9240 }
9241 }
9242 return rcStrict;
9243}
9244
9245
9246
9247/**
9248 * Stores a data byte.
9249 *
9250 * @returns Strict VBox status code.
9251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9252 * @param iSegReg The index of the segment register to use for
9253 * this access. The base and limits are checked.
9254 * @param GCPtrMem The address of the guest memory.
9255 * @param u8Value The value to store.
9256 */
9257IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9258{
9259 /* The lazy approach for now... */
9260 uint8_t *pu8Dst;
9261 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9262 if (rc == VINF_SUCCESS)
9263 {
9264 *pu8Dst = u8Value;
9265 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9266 }
9267 return rc;
9268}
9269
9270
9271#ifdef IEM_WITH_SETJMP
9272/**
9273 * Stores a data byte, longjmp on error.
9274 *
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param iSegReg The index of the segment register to use for
9277 * this access. The base and limits are checked.
9278 * @param GCPtrMem The address of the guest memory.
9279 * @param u8Value The value to store.
9280 */
9281IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9282{
9283 /* The lazy approach for now... */
9284 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9285 *pu8Dst = u8Value;
9286 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9287}
9288#endif
9289
9290
9291/**
9292 * Stores a data word.
9293 *
9294 * @returns Strict VBox status code.
9295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9296 * @param iSegReg The index of the segment register to use for
9297 * this access. The base and limits are checked.
9298 * @param GCPtrMem The address of the guest memory.
9299 * @param u16Value The value to store.
9300 */
9301IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9302{
9303 /* The lazy approach for now... */
9304 uint16_t *pu16Dst;
9305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9306 if (rc == VINF_SUCCESS)
9307 {
9308 *pu16Dst = u16Value;
9309 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9310 }
9311 return rc;
9312}
9313
9314
9315#ifdef IEM_WITH_SETJMP
9316/**
9317 * Stores a data word, longjmp on error.
9318 *
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param iSegReg The index of the segment register to use for
9321 * this access. The base and limits are checked.
9322 * @param GCPtrMem The address of the guest memory.
9323 * @param u16Value The value to store.
9324 */
9325IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9326{
9327 /* The lazy approach for now... */
9328 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9329 *pu16Dst = u16Value;
9330 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9331}
9332#endif
9333
9334
9335/**
9336 * Stores a data dword.
9337 *
9338 * @returns Strict VBox status code.
9339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9340 * @param iSegReg The index of the segment register to use for
9341 * this access. The base and limits are checked.
9342 * @param GCPtrMem The address of the guest memory.
9343 * @param u32Value The value to store.
9344 */
9345IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9346{
9347 /* The lazy approach for now... */
9348 uint32_t *pu32Dst;
9349 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9350 if (rc == VINF_SUCCESS)
9351 {
9352 *pu32Dst = u32Value;
9353 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9354 }
9355 return rc;
9356}
9357
9358
9359#ifdef IEM_WITH_SETJMP
9360/**
9361 * Stores a data dword.
9362 *
9363 * @returns Strict VBox status code.
9364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9365 * @param iSegReg The index of the segment register to use for
9366 * this access. The base and limits are checked.
9367 * @param GCPtrMem The address of the guest memory.
9368 * @param u32Value The value to store.
9369 */
9370IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9371{
9372 /* The lazy approach for now... */
9373 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9374 *pu32Dst = u32Value;
9375 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9376}
9377#endif
9378
9379
9380/**
9381 * Stores a data qword.
9382 *
9383 * @returns Strict VBox status code.
9384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9385 * @param iSegReg The index of the segment register to use for
9386 * this access. The base and limits are checked.
9387 * @param GCPtrMem The address of the guest memory.
9388 * @param u64Value The value to store.
9389 */
9390IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9391{
9392 /* The lazy approach for now... */
9393 uint64_t *pu64Dst;
9394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9395 if (rc == VINF_SUCCESS)
9396 {
9397 *pu64Dst = u64Value;
9398 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9399 }
9400 return rc;
9401}
9402
9403
9404#ifdef IEM_WITH_SETJMP
9405/**
9406 * Stores a data qword, longjmp on error.
9407 *
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 * @param u64Value The value to store.
9413 */
9414IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9415{
9416 /* The lazy approach for now... */
9417 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9418 *pu64Dst = u64Value;
9419 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9420}
9421#endif
9422
9423
9424/**
9425 * Stores a data dqword.
9426 *
9427 * @returns Strict VBox status code.
9428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9429 * @param iSegReg The index of the segment register to use for
9430 * this access. The base and limits are checked.
9431 * @param GCPtrMem The address of the guest memory.
9432 * @param u128Value The value to store.
9433 */
9434IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9435{
9436 /* The lazy approach for now... */
9437 uint128_t *pu128Dst;
9438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9439 if (rc == VINF_SUCCESS)
9440 {
9441 *pu128Dst = u128Value;
9442 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9443 }
9444 return rc;
9445}
9446
9447
9448#ifdef IEM_WITH_SETJMP
9449/**
9450 * Stores a data dqword, longjmp on error.
9451 *
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 * @param u128Value The value to store.
9457 */
9458IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9459{
9460 /* The lazy approach for now... */
9461 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9462 *pu128Dst = u128Value;
9463 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9464}
9465#endif
9466
9467
9468/**
9469 * Stores a data dqword, SSE aligned.
9470 *
9471 * @returns Strict VBox status code.
9472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9473 * @param iSegReg The index of the segment register to use for
9474 * this access. The base and limits are checked.
9475 * @param GCPtrMem The address of the guest memory.
9476 * @param u128Value The value to store.
9477 */
9478IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9479{
9480 /* The lazy approach for now... */
9481 if ( (GCPtrMem & 15)
9482 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9483 return iemRaiseGeneralProtectionFault0(pVCpu);
9484
9485 uint128_t *pu128Dst;
9486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9487 if (rc == VINF_SUCCESS)
9488 {
9489 *pu128Dst = u128Value;
9490 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9491 }
9492 return rc;
9493}
9494
9495
9496#ifdef IEM_WITH_SETJMP
9497/**
9498 * Stores a data dqword, SSE aligned.
9499 *
9500 * @returns Strict VBox status code.
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param iSegReg The index of the segment register to use for
9503 * this access. The base and limits are checked.
9504 * @param GCPtrMem The address of the guest memory.
9505 * @param u128Value The value to store.
9506 */
9507DECL_NO_INLINE(IEM_STATIC, void)
9508iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9509{
9510 /* The lazy approach for now... */
9511 if ( (GCPtrMem & 15) == 0
9512 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9513 {
9514 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9515 *pu128Dst = u128Value;
9516 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9517 return;
9518 }
9519
9520 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9521 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9522}
9523#endif
9524
9525
9526/**
9527 * Stores a descriptor register (sgdt, sidt).
9528 *
9529 * @returns Strict VBox status code.
9530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9531 * @param cbLimit The limit.
9532 * @param GCPtrBase The base address.
9533 * @param iSegReg The index of the segment register to use for
9534 * this access. The base and limits are checked.
9535 * @param GCPtrMem The address of the guest memory.
9536 */
9537IEM_STATIC VBOXSTRICTRC
9538iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /*
9541 * The SIDT and SGDT instructions actually stores the data using two
9542 * independent writes. The instructions does not respond to opsize prefixes.
9543 */
9544 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9545 if (rcStrict == VINF_SUCCESS)
9546 {
9547 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9548 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9549 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9550 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9551 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9552 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9553 else
9554 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9555 }
9556 return rcStrict;
9557}
9558
9559
9560/**
9561 * Pushes a word onto the stack.
9562 *
9563 * @returns Strict VBox status code.
9564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9565 * @param u16Value The value to push.
9566 */
9567IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9568{
9569 /* Increment the stack pointer. */
9570 uint64_t uNewRsp;
9571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9572 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9573
9574 /* Write the word the lazy way. */
9575 uint16_t *pu16Dst;
9576 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9577 if (rc == VINF_SUCCESS)
9578 {
9579 *pu16Dst = u16Value;
9580 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9581 }
9582
9583 /* Commit the new RSP value unless we an access handler made trouble. */
9584 if (rc == VINF_SUCCESS)
9585 pCtx->rsp = uNewRsp;
9586
9587 return rc;
9588}
9589
9590
9591/**
9592 * Pushes a dword onto the stack.
9593 *
9594 * @returns Strict VBox status code.
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param u32Value The value to push.
9597 */
9598IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9599{
9600 /* Increment the stack pointer. */
9601 uint64_t uNewRsp;
9602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9603 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9604
9605 /* Write the dword the lazy way. */
9606 uint32_t *pu32Dst;
9607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9608 if (rc == VINF_SUCCESS)
9609 {
9610 *pu32Dst = u32Value;
9611 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9612 }
9613
9614 /* Commit the new RSP value unless we an access handler made trouble. */
9615 if (rc == VINF_SUCCESS)
9616 pCtx->rsp = uNewRsp;
9617
9618 return rc;
9619}
9620
9621
9622/**
9623 * Pushes a dword segment register value onto the stack.
9624 *
9625 * @returns Strict VBox status code.
9626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9627 * @param u32Value The value to push.
9628 */
9629IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9630{
9631 /* Increment the stack pointer. */
9632 uint64_t uNewRsp;
9633 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9634 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9635
9636 VBOXSTRICTRC rc;
9637 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9638 {
9639 /* The recompiler writes a full dword. */
9640 uint32_t *pu32Dst;
9641 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9642 if (rc == VINF_SUCCESS)
9643 {
9644 *pu32Dst = u32Value;
9645 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9646 }
9647 }
9648 else
9649 {
9650 /* The intel docs talks about zero extending the selector register
9651 value. My actual intel CPU here might be zero extending the value
9652 but it still only writes the lower word... */
9653 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9654 * happens when crossing an electric page boundrary, is the high word checked
9655 * for write accessibility or not? Probably it is. What about segment limits?
9656 * It appears this behavior is also shared with trap error codes.
9657 *
9658 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9659 * ancient hardware when it actually did change. */
9660 uint16_t *pu16Dst;
9661 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9662 if (rc == VINF_SUCCESS)
9663 {
9664 *pu16Dst = (uint16_t)u32Value;
9665 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9666 }
9667 }
9668
9669 /* Commit the new RSP value unless we an access handler made trouble. */
9670 if (rc == VINF_SUCCESS)
9671 pCtx->rsp = uNewRsp;
9672
9673 return rc;
9674}
9675
9676
9677/**
9678 * Pushes a qword onto the stack.
9679 *
9680 * @returns Strict VBox status code.
9681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9682 * @param u64Value The value to push.
9683 */
9684IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9685{
9686 /* Increment the stack pointer. */
9687 uint64_t uNewRsp;
9688 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9689 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9690
9691 /* Write the word the lazy way. */
9692 uint64_t *pu64Dst;
9693 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9694 if (rc == VINF_SUCCESS)
9695 {
9696 *pu64Dst = u64Value;
9697 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9698 }
9699
9700 /* Commit the new RSP value unless we an access handler made trouble. */
9701 if (rc == VINF_SUCCESS)
9702 pCtx->rsp = uNewRsp;
9703
9704 return rc;
9705}
9706
9707
9708/**
9709 * Pops a word from the stack.
9710 *
9711 * @returns Strict VBox status code.
9712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9713 * @param pu16Value Where to store the popped value.
9714 */
9715IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9716{
9717 /* Increment the stack pointer. */
9718 uint64_t uNewRsp;
9719 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9720 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9721
9722 /* Write the word the lazy way. */
9723 uint16_t const *pu16Src;
9724 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9725 if (rc == VINF_SUCCESS)
9726 {
9727 *pu16Value = *pu16Src;
9728 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9729
9730 /* Commit the new RSP value. */
9731 if (rc == VINF_SUCCESS)
9732 pCtx->rsp = uNewRsp;
9733 }
9734
9735 return rc;
9736}
9737
9738
9739/**
9740 * Pops a dword from the stack.
9741 *
9742 * @returns Strict VBox status code.
9743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9744 * @param pu32Value Where to store the popped value.
9745 */
9746IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9747{
9748 /* Increment the stack pointer. */
9749 uint64_t uNewRsp;
9750 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9751 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9752
9753 /* Write the word the lazy way. */
9754 uint32_t const *pu32Src;
9755 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9756 if (rc == VINF_SUCCESS)
9757 {
9758 *pu32Value = *pu32Src;
9759 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9760
9761 /* Commit the new RSP value. */
9762 if (rc == VINF_SUCCESS)
9763 pCtx->rsp = uNewRsp;
9764 }
9765
9766 return rc;
9767}
9768
9769
9770/**
9771 * Pops a qword from the stack.
9772 *
9773 * @returns Strict VBox status code.
9774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9775 * @param pu64Value Where to store the popped value.
9776 */
9777IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9778{
9779 /* Increment the stack pointer. */
9780 uint64_t uNewRsp;
9781 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9782 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9783
9784 /* Write the word the lazy way. */
9785 uint64_t const *pu64Src;
9786 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9787 if (rc == VINF_SUCCESS)
9788 {
9789 *pu64Value = *pu64Src;
9790 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9791
9792 /* Commit the new RSP value. */
9793 if (rc == VINF_SUCCESS)
9794 pCtx->rsp = uNewRsp;
9795 }
9796
9797 return rc;
9798}
9799
9800
9801/**
9802 * Pushes a word onto the stack, using a temporary stack pointer.
9803 *
9804 * @returns Strict VBox status code.
9805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9806 * @param u16Value The value to push.
9807 * @param pTmpRsp Pointer to the temporary stack pointer.
9808 */
9809IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9810{
9811 /* Increment the stack pointer. */
9812 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9813 RTUINT64U NewRsp = *pTmpRsp;
9814 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9815
9816 /* Write the word the lazy way. */
9817 uint16_t *pu16Dst;
9818 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9819 if (rc == VINF_SUCCESS)
9820 {
9821 *pu16Dst = u16Value;
9822 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9823 }
9824
9825 /* Commit the new RSP value unless we an access handler made trouble. */
9826 if (rc == VINF_SUCCESS)
9827 *pTmpRsp = NewRsp;
9828
9829 return rc;
9830}
9831
9832
9833/**
9834 * Pushes a dword onto the stack, using a temporary stack pointer.
9835 *
9836 * @returns Strict VBox status code.
9837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9838 * @param u32Value The value to push.
9839 * @param pTmpRsp Pointer to the temporary stack pointer.
9840 */
9841IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9842{
9843 /* Increment the stack pointer. */
9844 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9845 RTUINT64U NewRsp = *pTmpRsp;
9846 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9847
9848 /* Write the word the lazy way. */
9849 uint32_t *pu32Dst;
9850 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9851 if (rc == VINF_SUCCESS)
9852 {
9853 *pu32Dst = u32Value;
9854 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9855 }
9856
9857 /* Commit the new RSP value unless we an access handler made trouble. */
9858 if (rc == VINF_SUCCESS)
9859 *pTmpRsp = NewRsp;
9860
9861 return rc;
9862}
9863
9864
9865/**
9866 * Pushes a dword onto the stack, using a temporary stack pointer.
9867 *
9868 * @returns Strict VBox status code.
9869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9870 * @param u64Value The value to push.
9871 * @param pTmpRsp Pointer to the temporary stack pointer.
9872 */
9873IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9874{
9875 /* Increment the stack pointer. */
9876 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9877 RTUINT64U NewRsp = *pTmpRsp;
9878 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9879
9880 /* Write the word the lazy way. */
9881 uint64_t *pu64Dst;
9882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9883 if (rc == VINF_SUCCESS)
9884 {
9885 *pu64Dst = u64Value;
9886 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9887 }
9888
9889 /* Commit the new RSP value unless we an access handler made trouble. */
9890 if (rc == VINF_SUCCESS)
9891 *pTmpRsp = NewRsp;
9892
9893 return rc;
9894}
9895
9896
9897/**
9898 * Pops a word from the stack, using a temporary stack pointer.
9899 *
9900 * @returns Strict VBox status code.
9901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9902 * @param pu16Value Where to store the popped value.
9903 * @param pTmpRsp Pointer to the temporary stack pointer.
9904 */
9905IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9906{
9907 /* Increment the stack pointer. */
9908 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9909 RTUINT64U NewRsp = *pTmpRsp;
9910 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9911
9912 /* Write the word the lazy way. */
9913 uint16_t const *pu16Src;
9914 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9915 if (rc == VINF_SUCCESS)
9916 {
9917 *pu16Value = *pu16Src;
9918 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9919
9920 /* Commit the new RSP value. */
9921 if (rc == VINF_SUCCESS)
9922 *pTmpRsp = NewRsp;
9923 }
9924
9925 return rc;
9926}
9927
9928
9929/**
9930 * Pops a dword from the stack, using a temporary stack pointer.
9931 *
9932 * @returns Strict VBox status code.
9933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9934 * @param pu32Value Where to store the popped value.
9935 * @param pTmpRsp Pointer to the temporary stack pointer.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9938{
9939 /* Increment the stack pointer. */
9940 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9941 RTUINT64U NewRsp = *pTmpRsp;
9942 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9943
9944 /* Write the word the lazy way. */
9945 uint32_t const *pu32Src;
9946 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9947 if (rc == VINF_SUCCESS)
9948 {
9949 *pu32Value = *pu32Src;
9950 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9951
9952 /* Commit the new RSP value. */
9953 if (rc == VINF_SUCCESS)
9954 *pTmpRsp = NewRsp;
9955 }
9956
9957 return rc;
9958}
9959
9960
9961/**
9962 * Pops a qword from the stack, using a temporary stack pointer.
9963 *
9964 * @returns Strict VBox status code.
9965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9966 * @param pu64Value Where to store the popped value.
9967 * @param pTmpRsp Pointer to the temporary stack pointer.
9968 */
9969IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9970{
9971 /* Increment the stack pointer. */
9972 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9973 RTUINT64U NewRsp = *pTmpRsp;
9974 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9975
9976 /* Write the word the lazy way. */
9977 uint64_t const *pu64Src;
9978 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9979 if (rcStrict == VINF_SUCCESS)
9980 {
9981 *pu64Value = *pu64Src;
9982 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9983
9984 /* Commit the new RSP value. */
9985 if (rcStrict == VINF_SUCCESS)
9986 *pTmpRsp = NewRsp;
9987 }
9988
9989 return rcStrict;
9990}
9991
9992
9993/**
9994 * Begin a special stack push (used by interrupt, exceptions and such).
9995 *
9996 * This will raise \#SS or \#PF if appropriate.
9997 *
9998 * @returns Strict VBox status code.
9999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10000 * @param cbMem The number of bytes to push onto the stack.
10001 * @param ppvMem Where to return the pointer to the stack memory.
10002 * As with the other memory functions this could be
10003 * direct access or bounce buffered access, so
10004 * don't commit register until the commit call
10005 * succeeds.
10006 * @param puNewRsp Where to return the new RSP value. This must be
10007 * passed unchanged to
10008 * iemMemStackPushCommitSpecial().
10009 */
10010IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10011{
10012 Assert(cbMem < UINT8_MAX);
10013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10014 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10015 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10016}
10017
10018
10019/**
10020 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10021 *
10022 * This will update the rSP.
10023 *
10024 * @returns Strict VBox status code.
10025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10026 * @param pvMem The pointer returned by
10027 * iemMemStackPushBeginSpecial().
10028 * @param uNewRsp The new RSP value returned by
10029 * iemMemStackPushBeginSpecial().
10030 */
10031IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10032{
10033 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10034 if (rcStrict == VINF_SUCCESS)
10035 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10036 return rcStrict;
10037}
10038
10039
10040/**
10041 * Begin a special stack pop (used by iret, retf and such).
10042 *
10043 * This will raise \#SS or \#PF if appropriate.
10044 *
10045 * @returns Strict VBox status code.
10046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10047 * @param cbMem The number of bytes to pop from the stack.
10048 * @param ppvMem Where to return the pointer to the stack memory.
10049 * @param puNewRsp Where to return the new RSP value. This must be
10050 * assigned to CPUMCTX::rsp manually some time
10051 * after iemMemStackPopDoneSpecial() has been
10052 * called.
10053 */
10054IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10055{
10056 Assert(cbMem < UINT8_MAX);
10057 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10058 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10059 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10060}
10061
10062
10063/**
10064 * Continue a special stack pop (used by iret and retf).
10065 *
10066 * This will raise \#SS or \#PF if appropriate.
10067 *
10068 * @returns Strict VBox status code.
10069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10070 * @param cbMem The number of bytes to pop from the stack.
10071 * @param ppvMem Where to return the pointer to the stack memory.
10072 * @param puNewRsp Where to return the new RSP value. This must be
10073 * assigned to CPUMCTX::rsp manually some time
10074 * after iemMemStackPopDoneSpecial() has been
10075 * called.
10076 */
10077IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10078{
10079 Assert(cbMem < UINT8_MAX);
10080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10081 RTUINT64U NewRsp;
10082 NewRsp.u = *puNewRsp;
10083 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10084 *puNewRsp = NewRsp.u;
10085 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10086}
10087
10088
10089/**
10090 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10091 * iemMemStackPopContinueSpecial).
10092 *
10093 * The caller will manually commit the rSP.
10094 *
10095 * @returns Strict VBox status code.
10096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10097 * @param pvMem The pointer returned by
10098 * iemMemStackPopBeginSpecial() or
10099 * iemMemStackPopContinueSpecial().
10100 */
10101IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10102{
10103 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10104}
10105
10106
10107/**
10108 * Fetches a system table byte.
10109 *
10110 * @returns Strict VBox status code.
10111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10112 * @param pbDst Where to return the byte.
10113 * @param iSegReg The index of the segment register to use for
10114 * this access. The base and limits are checked.
10115 * @param GCPtrMem The address of the guest memory.
10116 */
10117IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10118{
10119 /* The lazy approach for now... */
10120 uint8_t const *pbSrc;
10121 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10122 if (rc == VINF_SUCCESS)
10123 {
10124 *pbDst = *pbSrc;
10125 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10126 }
10127 return rc;
10128}
10129
10130
10131/**
10132 * Fetches a system table word.
10133 *
10134 * @returns Strict VBox status code.
10135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10136 * @param pu16Dst Where to return the word.
10137 * @param iSegReg The index of the segment register to use for
10138 * this access. The base and limits are checked.
10139 * @param GCPtrMem The address of the guest memory.
10140 */
10141IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10142{
10143 /* The lazy approach for now... */
10144 uint16_t const *pu16Src;
10145 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10146 if (rc == VINF_SUCCESS)
10147 {
10148 *pu16Dst = *pu16Src;
10149 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10150 }
10151 return rc;
10152}
10153
10154
10155/**
10156 * Fetches a system table dword.
10157 *
10158 * @returns Strict VBox status code.
10159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10160 * @param pu32Dst Where to return the dword.
10161 * @param iSegReg The index of the segment register to use for
10162 * this access. The base and limits are checked.
10163 * @param GCPtrMem The address of the guest memory.
10164 */
10165IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10166{
10167 /* The lazy approach for now... */
10168 uint32_t const *pu32Src;
10169 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10170 if (rc == VINF_SUCCESS)
10171 {
10172 *pu32Dst = *pu32Src;
10173 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10174 }
10175 return rc;
10176}
10177
10178
10179/**
10180 * Fetches a system table qword.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10184 * @param pu64Dst Where to return the qword.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 */
10189IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10190{
10191 /* The lazy approach for now... */
10192 uint64_t const *pu64Src;
10193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10194 if (rc == VINF_SUCCESS)
10195 {
10196 *pu64Dst = *pu64Src;
10197 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10198 }
10199 return rc;
10200}
10201
10202
10203/**
10204 * Fetches a descriptor table entry with caller specified error code.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param pDesc Where to return the descriptor table entry.
10209 * @param uSel The selector which table entry to fetch.
10210 * @param uXcpt The exception to raise on table lookup error.
10211 * @param uErrorCode The error code associated with the exception.
10212 */
10213IEM_STATIC VBOXSTRICTRC
10214iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10215{
10216 AssertPtr(pDesc);
10217 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10218
10219 /** @todo did the 286 require all 8 bytes to be accessible? */
10220 /*
10221 * Get the selector table base and check bounds.
10222 */
10223 RTGCPTR GCPtrBase;
10224 if (uSel & X86_SEL_LDT)
10225 {
10226 if ( !pCtx->ldtr.Attr.n.u1Present
10227 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10228 {
10229 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10230 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10231 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10232 uErrorCode, 0);
10233 }
10234
10235 Assert(pCtx->ldtr.Attr.n.u1Present);
10236 GCPtrBase = pCtx->ldtr.u64Base;
10237 }
10238 else
10239 {
10240 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10241 {
10242 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10243 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10244 uErrorCode, 0);
10245 }
10246 GCPtrBase = pCtx->gdtr.pGdt;
10247 }
10248
10249 /*
10250 * Read the legacy descriptor and maybe the long mode extensions if
10251 * required.
10252 */
10253 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10254 if (rcStrict == VINF_SUCCESS)
10255 {
10256 if ( !IEM_IS_LONG_MODE(pVCpu)
10257 || pDesc->Legacy.Gen.u1DescType)
10258 pDesc->Long.au64[1] = 0;
10259 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10260 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10261 else
10262 {
10263 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10264 /** @todo is this the right exception? */
10265 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10266 }
10267 }
10268 return rcStrict;
10269}
10270
10271
10272/**
10273 * Fetches a descriptor table entry.
10274 *
10275 * @returns Strict VBox status code.
10276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10277 * @param pDesc Where to return the descriptor table entry.
10278 * @param uSel The selector which table entry to fetch.
10279 * @param uXcpt The exception to raise on table lookup error.
10280 */
10281IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10282{
10283 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10284}
10285
10286
10287/**
10288 * Fakes a long mode stack selector for SS = 0.
10289 *
10290 * @param pDescSs Where to return the fake stack descriptor.
10291 * @param uDpl The DPL we want.
10292 */
10293IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10294{
10295 pDescSs->Long.au64[0] = 0;
10296 pDescSs->Long.au64[1] = 0;
10297 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10298 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10299 pDescSs->Long.Gen.u2Dpl = uDpl;
10300 pDescSs->Long.Gen.u1Present = 1;
10301 pDescSs->Long.Gen.u1Long = 1;
10302}
10303
10304
10305/**
10306 * Marks the selector descriptor as accessed (only non-system descriptors).
10307 *
10308 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10309 * will therefore skip the limit checks.
10310 *
10311 * @returns Strict VBox status code.
10312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10313 * @param uSel The selector.
10314 */
10315IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10316{
10317 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10318
10319 /*
10320 * Get the selector table base and calculate the entry address.
10321 */
10322 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10323 ? pCtx->ldtr.u64Base
10324 : pCtx->gdtr.pGdt;
10325 GCPtr += uSel & X86_SEL_MASK;
10326
10327 /*
10328 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10329 * ugly stuff to avoid this. This will make sure it's an atomic access
10330 * as well more or less remove any question about 8-bit or 32-bit accesss.
10331 */
10332 VBOXSTRICTRC rcStrict;
10333 uint32_t volatile *pu32;
10334 if ((GCPtr & 3) == 0)
10335 {
10336 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10337 GCPtr += 2 + 2;
10338 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10339 if (rcStrict != VINF_SUCCESS)
10340 return rcStrict;
10341 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10342 }
10343 else
10344 {
10345 /* The misaligned GDT/LDT case, map the whole thing. */
10346 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10347 if (rcStrict != VINF_SUCCESS)
10348 return rcStrict;
10349 switch ((uintptr_t)pu32 & 3)
10350 {
10351 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10352 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10353 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10354 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10355 }
10356 }
10357
10358 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10359}
10360
10361/** @} */
10362
10363
10364/*
10365 * Include the C/C++ implementation of instruction.
10366 */
10367#include "IEMAllCImpl.cpp.h"
10368
10369
10370
10371/** @name "Microcode" macros.
10372 *
10373 * The idea is that we should be able to use the same code to interpret
10374 * instructions as well as recompiler instructions. Thus this obfuscation.
10375 *
10376 * @{
10377 */
10378#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10379#define IEM_MC_END() }
10380#define IEM_MC_PAUSE() do {} while (0)
10381#define IEM_MC_CONTINUE() do {} while (0)
10382
10383/** Internal macro. */
10384#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10385 do \
10386 { \
10387 VBOXSTRICTRC rcStrict2 = a_Expr; \
10388 if (rcStrict2 != VINF_SUCCESS) \
10389 return rcStrict2; \
10390 } while (0)
10391
10392
10393#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10394#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10395#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10396#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10397#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10398#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10399#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10400#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10401#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10402 do { \
10403 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10404 return iemRaiseDeviceNotAvailable(pVCpu); \
10405 } while (0)
10406#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10407 do { \
10408 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10409 return iemRaiseMathFault(pVCpu); \
10410 } while (0)
10411#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10412 do { \
10413 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10414 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10415 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10416 return iemRaiseUndefinedOpcode(pVCpu); \
10417 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10418 return iemRaiseDeviceNotAvailable(pVCpu); \
10419 } while (0)
10420#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10421 do { \
10422 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10423 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10424 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10425 return iemRaiseUndefinedOpcode(pVCpu); \
10426 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10427 return iemRaiseDeviceNotAvailable(pVCpu); \
10428 } while (0)
10429#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10430 do { \
10431 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10432 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10433 return iemRaiseUndefinedOpcode(pVCpu); \
10434 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10435 return iemRaiseDeviceNotAvailable(pVCpu); \
10436 } while (0)
10437#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10438 do { \
10439 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10440 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10441 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10442 return iemRaiseUndefinedOpcode(pVCpu); \
10443 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10444 return iemRaiseDeviceNotAvailable(pVCpu); \
10445 } while (0)
10446#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10447 do { \
10448 if (pVCpu->iem.s.uCpl != 0) \
10449 return iemRaiseGeneralProtectionFault0(pVCpu); \
10450 } while (0)
10451
10452
10453#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10454#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10455#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10456#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10457#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10458#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10459#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10460 uint32_t a_Name; \
10461 uint32_t *a_pName = &a_Name
10462#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10463 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10464
10465#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10466#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10467
10468#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10469#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10470#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10471#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10472#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10473#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10474#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10475#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10476#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10477#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10478#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10479#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10480#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10481#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10482#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10483#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10484#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10485#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10486#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10487#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10488#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10489#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10490#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10491#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10492#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10493#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10494#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10495#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10496#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10497/** @note Not for IOPL or IF testing or modification. */
10498#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10499#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10500#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10501#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10502
10503#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10504#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10505#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10506#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10507#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10508#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10509#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10510#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10511#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10512#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10513#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10514 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10515
10516#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10517#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10518/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10519 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10520#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10521#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10522/** @note Not for IOPL or IF testing or modification. */
10523#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10524
10525#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10526#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10527#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10528 do { \
10529 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10530 *pu32Reg += (a_u32Value); \
10531 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10532 } while (0)
10533#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10534
10535#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10536#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10537#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10538 do { \
10539 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10540 *pu32Reg -= (a_u32Value); \
10541 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10542 } while (0)
10543#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10544#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10545
10546#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10547#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10548#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10549#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10550#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10551#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10552#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10553
10554#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10555#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10556#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10557#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10558
10559#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10560#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10561#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10562
10563#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10564#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10565#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10566
10567#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10568#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10569#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10570
10571#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10572#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10573#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10574
10575#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10576
10577#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10578
10579#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10580#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10581#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10582 do { \
10583 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10584 *pu32Reg &= (a_u32Value); \
10585 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10586 } while (0)
10587#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10588
10589#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10590#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10591#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10592 do { \
10593 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10594 *pu32Reg |= (a_u32Value); \
10595 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10596 } while (0)
10597#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10598
10599
10600/** @note Not for IOPL or IF modification. */
10601#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10602/** @note Not for IOPL or IF modification. */
10603#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10604/** @note Not for IOPL or IF modification. */
10605#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10606
10607#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10608
10609
10610#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10611 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10612#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10613 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10614#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10615 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10616#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10617 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10618#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10619 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10620#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10621 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10622#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10623 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10624
10625#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10626 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10627#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10628 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10629#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10630 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10631#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10632 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10633#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10634 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10635#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10636 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10637 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10638 } while (0)
10639#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10640 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10641 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10642 } while (0)
10643#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10644 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10645#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10646 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10647#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10648 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10649#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10650 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10651 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10652
10653#ifndef IEM_WITH_SETJMP
10654# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10656# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10657 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10658# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10660#else
10661# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10662 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10663# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10664 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10665# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10666 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10667#endif
10668
10669#ifndef IEM_WITH_SETJMP
10670# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10672# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10674# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10676#else
10677# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10678 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10679# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10680 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10681# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10682 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10683#endif
10684
10685#ifndef IEM_WITH_SETJMP
10686# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10688# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10690# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10692#else
10693# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10694 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10695# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10696 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10697# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10698 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10699#endif
10700
10701#ifdef SOME_UNUSED_FUNCTION
10702# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10704#endif
10705
10706#ifndef IEM_WITH_SETJMP
10707# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10709# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10711# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10713# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10715#else
10716# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10717 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10718# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10719 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10720# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10721 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10722# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10723 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10724#endif
10725
10726#ifndef IEM_WITH_SETJMP
10727# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10729# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10731# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10733#else
10734# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10735 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10736# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10737 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10738# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10739 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10740#endif
10741
10742#ifndef IEM_WITH_SETJMP
10743# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10745# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10747#else
10748# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10749 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10750# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10751 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10752#endif
10753
10754
10755
10756#ifndef IEM_WITH_SETJMP
10757# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10758 do { \
10759 uint8_t u8Tmp; \
10760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10761 (a_u16Dst) = u8Tmp; \
10762 } while (0)
10763# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10764 do { \
10765 uint8_t u8Tmp; \
10766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10767 (a_u32Dst) = u8Tmp; \
10768 } while (0)
10769# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10770 do { \
10771 uint8_t u8Tmp; \
10772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10773 (a_u64Dst) = u8Tmp; \
10774 } while (0)
10775# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10776 do { \
10777 uint16_t u16Tmp; \
10778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10779 (a_u32Dst) = u16Tmp; \
10780 } while (0)
10781# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10782 do { \
10783 uint16_t u16Tmp; \
10784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10785 (a_u64Dst) = u16Tmp; \
10786 } while (0)
10787# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10788 do { \
10789 uint32_t u32Tmp; \
10790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10791 (a_u64Dst) = u32Tmp; \
10792 } while (0)
10793#else /* IEM_WITH_SETJMP */
10794# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10795 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10796# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10797 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10798# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10799 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10800# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10801 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10802# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10803 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10804# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10805 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10806#endif /* IEM_WITH_SETJMP */
10807
10808#ifndef IEM_WITH_SETJMP
10809# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10810 do { \
10811 uint8_t u8Tmp; \
10812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10813 (a_u16Dst) = (int8_t)u8Tmp; \
10814 } while (0)
10815# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10816 do { \
10817 uint8_t u8Tmp; \
10818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10819 (a_u32Dst) = (int8_t)u8Tmp; \
10820 } while (0)
10821# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10822 do { \
10823 uint8_t u8Tmp; \
10824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10825 (a_u64Dst) = (int8_t)u8Tmp; \
10826 } while (0)
10827# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10828 do { \
10829 uint16_t u16Tmp; \
10830 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10831 (a_u32Dst) = (int16_t)u16Tmp; \
10832 } while (0)
10833# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10834 do { \
10835 uint16_t u16Tmp; \
10836 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10837 (a_u64Dst) = (int16_t)u16Tmp; \
10838 } while (0)
10839# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10840 do { \
10841 uint32_t u32Tmp; \
10842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10843 (a_u64Dst) = (int32_t)u32Tmp; \
10844 } while (0)
10845#else /* IEM_WITH_SETJMP */
10846# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10847 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10848# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10849 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10850# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10851 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10852# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10853 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10854# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10855 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10856# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10857 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10858#endif /* IEM_WITH_SETJMP */
10859
10860#ifndef IEM_WITH_SETJMP
10861# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10862 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10863# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10865# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10867# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10869#else
10870# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10871 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10872# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10873 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10874# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10875 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10876# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10877 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10878#endif
10879
10880#ifndef IEM_WITH_SETJMP
10881# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10882 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10883# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10885# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10887# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10889#else
10890# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10891 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10892# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10893 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10894# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10895 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10896# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10897 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10898#endif
10899
10900#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10901#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10902#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10903#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10904#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10905#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10906#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10907 do { \
10908 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10909 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10910 } while (0)
10911
10912#ifndef IEM_WITH_SETJMP
10913# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10915# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10917#else
10918# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10919 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10920# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10921 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10922#endif
10923
10924
10925#define IEM_MC_PUSH_U16(a_u16Value) \
10926 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10927#define IEM_MC_PUSH_U32(a_u32Value) \
10928 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10929#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10930 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10931#define IEM_MC_PUSH_U64(a_u64Value) \
10932 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10933
10934#define IEM_MC_POP_U16(a_pu16Value) \
10935 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10936#define IEM_MC_POP_U32(a_pu32Value) \
10937 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10938#define IEM_MC_POP_U64(a_pu64Value) \
10939 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10940
10941/** Maps guest memory for direct or bounce buffered access.
10942 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10943 * @remarks May return.
10944 */
10945#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10946 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10947
10948/** Maps guest memory for direct or bounce buffered access.
10949 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10950 * @remarks May return.
10951 */
10952#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10953 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10954
10955/** Commits the memory and unmaps the guest memory.
10956 * @remarks May return.
10957 */
10958#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10959 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10960
10961/** Commits the memory and unmaps the guest memory unless the FPU status word
10962 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10963 * that would cause FLD not to store.
10964 *
10965 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10966 * store, while \#P will not.
10967 *
10968 * @remarks May in theory return - for now.
10969 */
10970#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10971 do { \
10972 if ( !(a_u16FSW & X86_FSW_ES) \
10973 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10974 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10975 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10976 } while (0)
10977
10978/** Calculate efficient address from R/M. */
10979#ifndef IEM_WITH_SETJMP
10980# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10981 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10982#else
10983# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10984 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10985#endif
10986
10987#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10988#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10989#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10990#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10991#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10992#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10993#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10994
10995/**
10996 * Defers the rest of the instruction emulation to a C implementation routine
10997 * and returns, only taking the standard parameters.
10998 *
10999 * @param a_pfnCImpl The pointer to the C routine.
11000 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11001 */
11002#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11003
11004/**
11005 * Defers the rest of instruction emulation to a C implementation routine and
11006 * returns, taking one argument in addition to the standard ones.
11007 *
11008 * @param a_pfnCImpl The pointer to the C routine.
11009 * @param a0 The argument.
11010 */
11011#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11012
11013/**
11014 * Defers the rest of the instruction emulation to a C implementation routine
11015 * and returns, taking two arguments in addition to the standard ones.
11016 *
11017 * @param a_pfnCImpl The pointer to the C routine.
11018 * @param a0 The first extra argument.
11019 * @param a1 The second extra argument.
11020 */
11021#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11022
11023/**
11024 * Defers the rest of the instruction emulation to a C implementation routine
11025 * and returns, taking three arguments in addition to the standard ones.
11026 *
11027 * @param a_pfnCImpl The pointer to the C routine.
11028 * @param a0 The first extra argument.
11029 * @param a1 The second extra argument.
11030 * @param a2 The third extra argument.
11031 */
11032#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11033
11034/**
11035 * Defers the rest of the instruction emulation to a C implementation routine
11036 * and returns, taking four arguments in addition to the standard ones.
11037 *
11038 * @param a_pfnCImpl The pointer to the C routine.
11039 * @param a0 The first extra argument.
11040 * @param a1 The second extra argument.
11041 * @param a2 The third extra argument.
11042 * @param a3 The fourth extra argument.
11043 */
11044#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11045
11046/**
11047 * Defers the rest of the instruction emulation to a C implementation routine
11048 * and returns, taking two arguments in addition to the standard ones.
11049 *
11050 * @param a_pfnCImpl The pointer to the C routine.
11051 * @param a0 The first extra argument.
11052 * @param a1 The second extra argument.
11053 * @param a2 The third extra argument.
11054 * @param a3 The fourth extra argument.
11055 * @param a4 The fifth extra argument.
11056 */
11057#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11058
11059/**
11060 * Defers the entire instruction emulation to a C implementation routine and
11061 * returns, only taking the standard parameters.
11062 *
11063 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11064 *
11065 * @param a_pfnCImpl The pointer to the C routine.
11066 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11067 */
11068#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11069
11070/**
11071 * Defers the entire instruction emulation to a C implementation routine and
11072 * returns, taking one argument in addition to the standard ones.
11073 *
11074 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11075 *
11076 * @param a_pfnCImpl The pointer to the C routine.
11077 * @param a0 The argument.
11078 */
11079#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11080
11081/**
11082 * Defers the entire instruction emulation to a C implementation routine and
11083 * returns, taking two arguments in addition to the standard ones.
11084 *
11085 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11086 *
11087 * @param a_pfnCImpl The pointer to the C routine.
11088 * @param a0 The first extra argument.
11089 * @param a1 The second extra argument.
11090 */
11091#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11092
11093/**
11094 * Defers the entire instruction emulation to a C implementation routine and
11095 * returns, taking three arguments in addition to the standard ones.
11096 *
11097 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11098 *
11099 * @param a_pfnCImpl The pointer to the C routine.
11100 * @param a0 The first extra argument.
11101 * @param a1 The second extra argument.
11102 * @param a2 The third extra argument.
11103 */
11104#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11105
11106/**
11107 * Calls a FPU assembly implementation taking one visible argument.
11108 *
11109 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11110 * @param a0 The first extra argument.
11111 */
11112#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11113 do { \
11114 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11115 } while (0)
11116
11117/**
11118 * Calls a FPU assembly implementation taking two visible arguments.
11119 *
11120 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11121 * @param a0 The first extra argument.
11122 * @param a1 The second extra argument.
11123 */
11124#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11125 do { \
11126 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11127 } while (0)
11128
11129/**
11130 * Calls a FPU assembly implementation taking three visible arguments.
11131 *
11132 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11133 * @param a0 The first extra argument.
11134 * @param a1 The second extra argument.
11135 * @param a2 The third extra argument.
11136 */
11137#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11138 do { \
11139 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11140 } while (0)
11141
11142#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11143 do { \
11144 (a_FpuData).FSW = (a_FSW); \
11145 (a_FpuData).r80Result = *(a_pr80Value); \
11146 } while (0)
11147
11148/** Pushes FPU result onto the stack. */
11149#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11150 iemFpuPushResult(pVCpu, &a_FpuData)
11151/** Pushes FPU result onto the stack and sets the FPUDP. */
11152#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11153 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11154
11155/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11156#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11157 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11158
11159/** Stores FPU result in a stack register. */
11160#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11161 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11162/** Stores FPU result in a stack register and pops the stack. */
11163#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11164 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11165/** Stores FPU result in a stack register and sets the FPUDP. */
11166#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11167 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11168/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11169 * stack. */
11170#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11171 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11172
11173/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11174#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11175 iemFpuUpdateOpcodeAndIp(pVCpu)
11176/** Free a stack register (for FFREE and FFREEP). */
11177#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11178 iemFpuStackFree(pVCpu, a_iStReg)
11179/** Increment the FPU stack pointer. */
11180#define IEM_MC_FPU_STACK_INC_TOP() \
11181 iemFpuStackIncTop(pVCpu)
11182/** Decrement the FPU stack pointer. */
11183#define IEM_MC_FPU_STACK_DEC_TOP() \
11184 iemFpuStackDecTop(pVCpu)
11185
11186/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11187#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11188 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11189/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11190#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11191 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11192/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11193#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11194 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11195/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11196#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11197 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11198/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11199 * stack. */
11200#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11201 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11202/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11203#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11204 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11205
11206/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11207#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11208 iemFpuStackUnderflow(pVCpu, a_iStDst)
11209/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11210 * stack. */
11211#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11212 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11213/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11214 * FPUDS. */
11215#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11216 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11217/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11218 * FPUDS. Pops stack. */
11219#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11220 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11221/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11222 * stack twice. */
11223#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11224 iemFpuStackUnderflowThenPopPop(pVCpu)
11225/** Raises a FPU stack underflow exception for an instruction pushing a result
11226 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11227#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11228 iemFpuStackPushUnderflow(pVCpu)
11229/** Raises a FPU stack underflow exception for an instruction pushing a result
11230 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11231#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11232 iemFpuStackPushUnderflowTwo(pVCpu)
11233
11234/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11235 * FPUIP, FPUCS and FOP. */
11236#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11237 iemFpuStackPushOverflow(pVCpu)
11238/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11239 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11240#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11241 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11242/** Prepares for using the FPU state.
11243 * Ensures that we can use the host FPU in the current context (RC+R0.
11244 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11245#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11246/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11247#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11248/** Actualizes the guest FPU state so it can be accessed and modified. */
11249#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11250
11251/** Prepares for using the SSE state.
11252 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11253 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11254#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11255/** Actualizes the guest XMM0..15 register state for read-only access. */
11256#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11257/** Actualizes the guest XMM0..15 register state for read-write access. */
11258#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11259
11260/**
11261 * Calls a MMX assembly implementation taking two visible arguments.
11262 *
11263 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11264 * @param a0 The first extra argument.
11265 * @param a1 The second extra argument.
11266 */
11267#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11268 do { \
11269 IEM_MC_PREPARE_FPU_USAGE(); \
11270 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11271 } while (0)
11272
11273/**
11274 * Calls a MMX assembly implementation taking three visible arguments.
11275 *
11276 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11277 * @param a0 The first extra argument.
11278 * @param a1 The second extra argument.
11279 * @param a2 The third extra argument.
11280 */
11281#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11282 do { \
11283 IEM_MC_PREPARE_FPU_USAGE(); \
11284 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11285 } while (0)
11286
11287
11288/**
11289 * Calls a SSE assembly implementation taking two visible arguments.
11290 *
11291 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11292 * @param a0 The first extra argument.
11293 * @param a1 The second extra argument.
11294 */
11295#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11296 do { \
11297 IEM_MC_PREPARE_SSE_USAGE(); \
11298 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11299 } while (0)
11300
11301/**
11302 * Calls a SSE assembly implementation taking three visible arguments.
11303 *
11304 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11305 * @param a0 The first extra argument.
11306 * @param a1 The second extra argument.
11307 * @param a2 The third extra argument.
11308 */
11309#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11310 do { \
11311 IEM_MC_PREPARE_SSE_USAGE(); \
11312 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11313 } while (0)
11314
11315/** @note Not for IOPL or IF testing. */
11316#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11317/** @note Not for IOPL or IF testing. */
11318#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11319/** @note Not for IOPL or IF testing. */
11320#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11321/** @note Not for IOPL or IF testing. */
11322#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11323/** @note Not for IOPL or IF testing. */
11324#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11325 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11326 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11327/** @note Not for IOPL or IF testing. */
11328#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11329 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11330 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11331/** @note Not for IOPL or IF testing. */
11332#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11333 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11334 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11335 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11336/** @note Not for IOPL or IF testing. */
11337#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11338 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11339 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11340 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11341#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11342#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11343#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11344/** @note Not for IOPL or IF testing. */
11345#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11346 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11347 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11348/** @note Not for IOPL or IF testing. */
11349#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11350 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11351 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11352/** @note Not for IOPL or IF testing. */
11353#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11354 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11355 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11356/** @note Not for IOPL or IF testing. */
11357#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11358 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11359 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11360/** @note Not for IOPL or IF testing. */
11361#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11362 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11363 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11364/** @note Not for IOPL or IF testing. */
11365#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11366 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11367 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11368#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11369#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11370
11371#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11372 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11373#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11374 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11375#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11376 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11377#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11378 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11379#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11380 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11381#define IEM_MC_IF_FCW_IM() \
11382 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11383
11384#define IEM_MC_ELSE() } else {
11385#define IEM_MC_ENDIF() } do {} while (0)
11386
11387/** @} */
11388
11389
11390/** @name Opcode Debug Helpers.
11391 * @{
11392 */
11393#ifdef VBOX_WITH_STATISTICS
11394# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11395#else
11396# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11397#endif
11398
11399#ifdef DEBUG
11400# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11401 do { \
11402 IEMOP_INC_STATS(a_Stats); \
11403 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11404 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11405 } while (0)
11406#else
11407# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11408#endif
11409
11410/** @} */
11411
11412
11413/** @name Opcode Helpers.
11414 * @{
11415 */
11416
11417#ifdef IN_RING3
11418# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11419 do { \
11420 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11421 else \
11422 { \
11423 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11424 return IEMOP_RAISE_INVALID_OPCODE(); \
11425 } \
11426 } while (0)
11427#else
11428# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11429 do { \
11430 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11431 else return IEMOP_RAISE_INVALID_OPCODE(); \
11432 } while (0)
11433#endif
11434
11435/** The instruction requires a 186 or later. */
11436#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11437# define IEMOP_HLP_MIN_186() do { } while (0)
11438#else
11439# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11440#endif
11441
11442/** The instruction requires a 286 or later. */
11443#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11444# define IEMOP_HLP_MIN_286() do { } while (0)
11445#else
11446# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11447#endif
11448
11449/** The instruction requires a 386 or later. */
11450#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11451# define IEMOP_HLP_MIN_386() do { } while (0)
11452#else
11453# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11454#endif
11455
11456/** The instruction requires a 386 or later if the given expression is true. */
11457#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11458# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11459#else
11460# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11461#endif
11462
11463/** The instruction requires a 486 or later. */
11464#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11465# define IEMOP_HLP_MIN_486() do { } while (0)
11466#else
11467# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11468#endif
11469
11470/** The instruction requires a Pentium (586) or later. */
11471#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11472# define IEMOP_HLP_MIN_586() do { } while (0)
11473#else
11474# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11475#endif
11476
11477/** The instruction requires a PentiumPro (686) or later. */
11478#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11479# define IEMOP_HLP_MIN_686() do { } while (0)
11480#else
11481# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11482#endif
11483
11484
11485/** The instruction raises an \#UD in real and V8086 mode. */
11486#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11487 do \
11488 { \
11489 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11490 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11491 } while (0)
11492
11493/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11494 * 64-bit mode. */
11495#define IEMOP_HLP_NO_64BIT() \
11496 do \
11497 { \
11498 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11499 return IEMOP_RAISE_INVALID_OPCODE(); \
11500 } while (0)
11501
11502/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11503 * 64-bit mode. */
11504#define IEMOP_HLP_ONLY_64BIT() \
11505 do \
11506 { \
11507 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11508 return IEMOP_RAISE_INVALID_OPCODE(); \
11509 } while (0)
11510
11511/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11512#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11513 do \
11514 { \
11515 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11516 iemRecalEffOpSize64Default(pVCpu); \
11517 } while (0)
11518
11519/** The instruction has 64-bit operand size if 64-bit mode. */
11520#define IEMOP_HLP_64BIT_OP_SIZE() \
11521 do \
11522 { \
11523 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11524 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11525 } while (0)
11526
11527/** Only a REX prefix immediately preceeding the first opcode byte takes
11528 * effect. This macro helps ensuring this as well as logging bad guest code. */
11529#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11530 do \
11531 { \
11532 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11533 { \
11534 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11535 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11536 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11537 pVCpu->iem.s.uRexB = 0; \
11538 pVCpu->iem.s.uRexIndex = 0; \
11539 pVCpu->iem.s.uRexReg = 0; \
11540 iemRecalEffOpSize(pVCpu); \
11541 } \
11542 } while (0)
11543
11544/**
11545 * Done decoding.
11546 */
11547#define IEMOP_HLP_DONE_DECODING() \
11548 do \
11549 { \
11550 /*nothing for now, maybe later... */ \
11551 } while (0)
11552
11553/**
11554 * Done decoding, raise \#UD exception if lock prefix present.
11555 */
11556#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11557 do \
11558 { \
11559 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11560 { /* likely */ } \
11561 else \
11562 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11563 } while (0)
11564#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11565 do \
11566 { \
11567 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11568 { /* likely */ } \
11569 else \
11570 { \
11571 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11572 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11573 } \
11574 } while (0)
11575#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11576 do \
11577 { \
11578 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11579 { /* likely */ } \
11580 else \
11581 { \
11582 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11583 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11584 } \
11585 } while (0)
11586
11587/**
11588 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11589 * are present.
11590 */
11591#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11592 do \
11593 { \
11594 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11595 { /* likely */ } \
11596 else \
11597 return IEMOP_RAISE_INVALID_OPCODE(); \
11598 } while (0)
11599
11600
11601/**
11602 * Calculates the effective address of a ModR/M memory operand.
11603 *
11604 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11605 *
11606 * @return Strict VBox status code.
11607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11608 * @param bRm The ModRM byte.
11609 * @param cbImm The size of any immediate following the
11610 * effective address opcode bytes. Important for
11611 * RIP relative addressing.
11612 * @param pGCPtrEff Where to return the effective address.
11613 */
11614IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11615{
11616 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11617 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11618# define SET_SS_DEF() \
11619 do \
11620 { \
11621 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11622 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11623 } while (0)
11624
11625 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11626 {
11627/** @todo Check the effective address size crap! */
11628 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11629 {
11630 uint16_t u16EffAddr;
11631
11632 /* Handle the disp16 form with no registers first. */
11633 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11634 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11635 else
11636 {
11637 /* Get the displacment. */
11638 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11639 {
11640 case 0: u16EffAddr = 0; break;
11641 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11642 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11643 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11644 }
11645
11646 /* Add the base and index registers to the disp. */
11647 switch (bRm & X86_MODRM_RM_MASK)
11648 {
11649 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11650 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11651 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11652 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11653 case 4: u16EffAddr += pCtx->si; break;
11654 case 5: u16EffAddr += pCtx->di; break;
11655 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11656 case 7: u16EffAddr += pCtx->bx; break;
11657 }
11658 }
11659
11660 *pGCPtrEff = u16EffAddr;
11661 }
11662 else
11663 {
11664 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11665 uint32_t u32EffAddr;
11666
11667 /* Handle the disp32 form with no registers first. */
11668 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11669 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11670 else
11671 {
11672 /* Get the register (or SIB) value. */
11673 switch ((bRm & X86_MODRM_RM_MASK))
11674 {
11675 case 0: u32EffAddr = pCtx->eax; break;
11676 case 1: u32EffAddr = pCtx->ecx; break;
11677 case 2: u32EffAddr = pCtx->edx; break;
11678 case 3: u32EffAddr = pCtx->ebx; break;
11679 case 4: /* SIB */
11680 {
11681 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11682
11683 /* Get the index and scale it. */
11684 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11685 {
11686 case 0: u32EffAddr = pCtx->eax; break;
11687 case 1: u32EffAddr = pCtx->ecx; break;
11688 case 2: u32EffAddr = pCtx->edx; break;
11689 case 3: u32EffAddr = pCtx->ebx; break;
11690 case 4: u32EffAddr = 0; /*none */ break;
11691 case 5: u32EffAddr = pCtx->ebp; break;
11692 case 6: u32EffAddr = pCtx->esi; break;
11693 case 7: u32EffAddr = pCtx->edi; break;
11694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11695 }
11696 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11697
11698 /* add base */
11699 switch (bSib & X86_SIB_BASE_MASK)
11700 {
11701 case 0: u32EffAddr += pCtx->eax; break;
11702 case 1: u32EffAddr += pCtx->ecx; break;
11703 case 2: u32EffAddr += pCtx->edx; break;
11704 case 3: u32EffAddr += pCtx->ebx; break;
11705 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11706 case 5:
11707 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11708 {
11709 u32EffAddr += pCtx->ebp;
11710 SET_SS_DEF();
11711 }
11712 else
11713 {
11714 uint32_t u32Disp;
11715 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11716 u32EffAddr += u32Disp;
11717 }
11718 break;
11719 case 6: u32EffAddr += pCtx->esi; break;
11720 case 7: u32EffAddr += pCtx->edi; break;
11721 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11722 }
11723 break;
11724 }
11725 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11726 case 6: u32EffAddr = pCtx->esi; break;
11727 case 7: u32EffAddr = pCtx->edi; break;
11728 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11729 }
11730
11731 /* Get and add the displacement. */
11732 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11733 {
11734 case 0:
11735 break;
11736 case 1:
11737 {
11738 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11739 u32EffAddr += i8Disp;
11740 break;
11741 }
11742 case 2:
11743 {
11744 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11745 u32EffAddr += u32Disp;
11746 break;
11747 }
11748 default:
11749 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11750 }
11751
11752 }
11753 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11754 *pGCPtrEff = u32EffAddr;
11755 else
11756 {
11757 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11758 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11759 }
11760 }
11761 }
11762 else
11763 {
11764 uint64_t u64EffAddr;
11765
11766 /* Handle the rip+disp32 form with no registers first. */
11767 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11768 {
11769 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11770 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11771 }
11772 else
11773 {
11774 /* Get the register (or SIB) value. */
11775 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11776 {
11777 case 0: u64EffAddr = pCtx->rax; break;
11778 case 1: u64EffAddr = pCtx->rcx; break;
11779 case 2: u64EffAddr = pCtx->rdx; break;
11780 case 3: u64EffAddr = pCtx->rbx; break;
11781 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11782 case 6: u64EffAddr = pCtx->rsi; break;
11783 case 7: u64EffAddr = pCtx->rdi; break;
11784 case 8: u64EffAddr = pCtx->r8; break;
11785 case 9: u64EffAddr = pCtx->r9; break;
11786 case 10: u64EffAddr = pCtx->r10; break;
11787 case 11: u64EffAddr = pCtx->r11; break;
11788 case 13: u64EffAddr = pCtx->r13; break;
11789 case 14: u64EffAddr = pCtx->r14; break;
11790 case 15: u64EffAddr = pCtx->r15; break;
11791 /* SIB */
11792 case 4:
11793 case 12:
11794 {
11795 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11796
11797 /* Get the index and scale it. */
11798 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11799 {
11800 case 0: u64EffAddr = pCtx->rax; break;
11801 case 1: u64EffAddr = pCtx->rcx; break;
11802 case 2: u64EffAddr = pCtx->rdx; break;
11803 case 3: u64EffAddr = pCtx->rbx; break;
11804 case 4: u64EffAddr = 0; /*none */ break;
11805 case 5: u64EffAddr = pCtx->rbp; break;
11806 case 6: u64EffAddr = pCtx->rsi; break;
11807 case 7: u64EffAddr = pCtx->rdi; break;
11808 case 8: u64EffAddr = pCtx->r8; break;
11809 case 9: u64EffAddr = pCtx->r9; break;
11810 case 10: u64EffAddr = pCtx->r10; break;
11811 case 11: u64EffAddr = pCtx->r11; break;
11812 case 12: u64EffAddr = pCtx->r12; break;
11813 case 13: u64EffAddr = pCtx->r13; break;
11814 case 14: u64EffAddr = pCtx->r14; break;
11815 case 15: u64EffAddr = pCtx->r15; break;
11816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11817 }
11818 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11819
11820 /* add base */
11821 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11822 {
11823 case 0: u64EffAddr += pCtx->rax; break;
11824 case 1: u64EffAddr += pCtx->rcx; break;
11825 case 2: u64EffAddr += pCtx->rdx; break;
11826 case 3: u64EffAddr += pCtx->rbx; break;
11827 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11828 case 6: u64EffAddr += pCtx->rsi; break;
11829 case 7: u64EffAddr += pCtx->rdi; break;
11830 case 8: u64EffAddr += pCtx->r8; break;
11831 case 9: u64EffAddr += pCtx->r9; break;
11832 case 10: u64EffAddr += pCtx->r10; break;
11833 case 11: u64EffAddr += pCtx->r11; break;
11834 case 12: u64EffAddr += pCtx->r12; break;
11835 case 14: u64EffAddr += pCtx->r14; break;
11836 case 15: u64EffAddr += pCtx->r15; break;
11837 /* complicated encodings */
11838 case 5:
11839 case 13:
11840 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11841 {
11842 if (!pVCpu->iem.s.uRexB)
11843 {
11844 u64EffAddr += pCtx->rbp;
11845 SET_SS_DEF();
11846 }
11847 else
11848 u64EffAddr += pCtx->r13;
11849 }
11850 else
11851 {
11852 uint32_t u32Disp;
11853 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11854 u64EffAddr += (int32_t)u32Disp;
11855 }
11856 break;
11857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11858 }
11859 break;
11860 }
11861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11862 }
11863
11864 /* Get and add the displacement. */
11865 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11866 {
11867 case 0:
11868 break;
11869 case 1:
11870 {
11871 int8_t i8Disp;
11872 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11873 u64EffAddr += i8Disp;
11874 break;
11875 }
11876 case 2:
11877 {
11878 uint32_t u32Disp;
11879 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11880 u64EffAddr += (int32_t)u32Disp;
11881 break;
11882 }
11883 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11884 }
11885
11886 }
11887
11888 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11889 *pGCPtrEff = u64EffAddr;
11890 else
11891 {
11892 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11893 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11894 }
11895 }
11896
11897 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11898 return VINF_SUCCESS;
11899}
11900
11901
11902/**
11903 * Calculates the effective address of a ModR/M memory operand.
11904 *
11905 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11906 *
11907 * @return Strict VBox status code.
11908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11909 * @param bRm The ModRM byte.
11910 * @param cbImm The size of any immediate following the
11911 * effective address opcode bytes. Important for
11912 * RIP relative addressing.
11913 * @param pGCPtrEff Where to return the effective address.
11914 * @param offRsp RSP displacement.
11915 */
11916IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11917{
11918 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11919 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11920# define SET_SS_DEF() \
11921 do \
11922 { \
11923 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11924 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11925 } while (0)
11926
11927 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11928 {
11929/** @todo Check the effective address size crap! */
11930 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11931 {
11932 uint16_t u16EffAddr;
11933
11934 /* Handle the disp16 form with no registers first. */
11935 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11936 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11937 else
11938 {
11939 /* Get the displacment. */
11940 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11941 {
11942 case 0: u16EffAddr = 0; break;
11943 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11944 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11945 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11946 }
11947
11948 /* Add the base and index registers to the disp. */
11949 switch (bRm & X86_MODRM_RM_MASK)
11950 {
11951 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11952 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11953 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11954 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11955 case 4: u16EffAddr += pCtx->si; break;
11956 case 5: u16EffAddr += pCtx->di; break;
11957 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11958 case 7: u16EffAddr += pCtx->bx; break;
11959 }
11960 }
11961
11962 *pGCPtrEff = u16EffAddr;
11963 }
11964 else
11965 {
11966 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11967 uint32_t u32EffAddr;
11968
11969 /* Handle the disp32 form with no registers first. */
11970 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11971 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11972 else
11973 {
11974 /* Get the register (or SIB) value. */
11975 switch ((bRm & X86_MODRM_RM_MASK))
11976 {
11977 case 0: u32EffAddr = pCtx->eax; break;
11978 case 1: u32EffAddr = pCtx->ecx; break;
11979 case 2: u32EffAddr = pCtx->edx; break;
11980 case 3: u32EffAddr = pCtx->ebx; break;
11981 case 4: /* SIB */
11982 {
11983 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11984
11985 /* Get the index and scale it. */
11986 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11987 {
11988 case 0: u32EffAddr = pCtx->eax; break;
11989 case 1: u32EffAddr = pCtx->ecx; break;
11990 case 2: u32EffAddr = pCtx->edx; break;
11991 case 3: u32EffAddr = pCtx->ebx; break;
11992 case 4: u32EffAddr = 0; /*none */ break;
11993 case 5: u32EffAddr = pCtx->ebp; break;
11994 case 6: u32EffAddr = pCtx->esi; break;
11995 case 7: u32EffAddr = pCtx->edi; break;
11996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11997 }
11998 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11999
12000 /* add base */
12001 switch (bSib & X86_SIB_BASE_MASK)
12002 {
12003 case 0: u32EffAddr += pCtx->eax; break;
12004 case 1: u32EffAddr += pCtx->ecx; break;
12005 case 2: u32EffAddr += pCtx->edx; break;
12006 case 3: u32EffAddr += pCtx->ebx; break;
12007 case 4:
12008 u32EffAddr += pCtx->esp + offRsp;
12009 SET_SS_DEF();
12010 break;
12011 case 5:
12012 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12013 {
12014 u32EffAddr += pCtx->ebp;
12015 SET_SS_DEF();
12016 }
12017 else
12018 {
12019 uint32_t u32Disp;
12020 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12021 u32EffAddr += u32Disp;
12022 }
12023 break;
12024 case 6: u32EffAddr += pCtx->esi; break;
12025 case 7: u32EffAddr += pCtx->edi; break;
12026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12027 }
12028 break;
12029 }
12030 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12031 case 6: u32EffAddr = pCtx->esi; break;
12032 case 7: u32EffAddr = pCtx->edi; break;
12033 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12034 }
12035
12036 /* Get and add the displacement. */
12037 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12038 {
12039 case 0:
12040 break;
12041 case 1:
12042 {
12043 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12044 u32EffAddr += i8Disp;
12045 break;
12046 }
12047 case 2:
12048 {
12049 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12050 u32EffAddr += u32Disp;
12051 break;
12052 }
12053 default:
12054 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12055 }
12056
12057 }
12058 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12059 *pGCPtrEff = u32EffAddr;
12060 else
12061 {
12062 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12063 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12064 }
12065 }
12066 }
12067 else
12068 {
12069 uint64_t u64EffAddr;
12070
12071 /* Handle the rip+disp32 form with no registers first. */
12072 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12073 {
12074 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12075 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12076 }
12077 else
12078 {
12079 /* Get the register (or SIB) value. */
12080 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12081 {
12082 case 0: u64EffAddr = pCtx->rax; break;
12083 case 1: u64EffAddr = pCtx->rcx; break;
12084 case 2: u64EffAddr = pCtx->rdx; break;
12085 case 3: u64EffAddr = pCtx->rbx; break;
12086 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12087 case 6: u64EffAddr = pCtx->rsi; break;
12088 case 7: u64EffAddr = pCtx->rdi; break;
12089 case 8: u64EffAddr = pCtx->r8; break;
12090 case 9: u64EffAddr = pCtx->r9; break;
12091 case 10: u64EffAddr = pCtx->r10; break;
12092 case 11: u64EffAddr = pCtx->r11; break;
12093 case 13: u64EffAddr = pCtx->r13; break;
12094 case 14: u64EffAddr = pCtx->r14; break;
12095 case 15: u64EffAddr = pCtx->r15; break;
12096 /* SIB */
12097 case 4:
12098 case 12:
12099 {
12100 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12101
12102 /* Get the index and scale it. */
12103 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12104 {
12105 case 0: u64EffAddr = pCtx->rax; break;
12106 case 1: u64EffAddr = pCtx->rcx; break;
12107 case 2: u64EffAddr = pCtx->rdx; break;
12108 case 3: u64EffAddr = pCtx->rbx; break;
12109 case 4: u64EffAddr = 0; /*none */ break;
12110 case 5: u64EffAddr = pCtx->rbp; break;
12111 case 6: u64EffAddr = pCtx->rsi; break;
12112 case 7: u64EffAddr = pCtx->rdi; break;
12113 case 8: u64EffAddr = pCtx->r8; break;
12114 case 9: u64EffAddr = pCtx->r9; break;
12115 case 10: u64EffAddr = pCtx->r10; break;
12116 case 11: u64EffAddr = pCtx->r11; break;
12117 case 12: u64EffAddr = pCtx->r12; break;
12118 case 13: u64EffAddr = pCtx->r13; break;
12119 case 14: u64EffAddr = pCtx->r14; break;
12120 case 15: u64EffAddr = pCtx->r15; break;
12121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12122 }
12123 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12124
12125 /* add base */
12126 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12127 {
12128 case 0: u64EffAddr += pCtx->rax; break;
12129 case 1: u64EffAddr += pCtx->rcx; break;
12130 case 2: u64EffAddr += pCtx->rdx; break;
12131 case 3: u64EffAddr += pCtx->rbx; break;
12132 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12133 case 6: u64EffAddr += pCtx->rsi; break;
12134 case 7: u64EffAddr += pCtx->rdi; break;
12135 case 8: u64EffAddr += pCtx->r8; break;
12136 case 9: u64EffAddr += pCtx->r9; break;
12137 case 10: u64EffAddr += pCtx->r10; break;
12138 case 11: u64EffAddr += pCtx->r11; break;
12139 case 12: u64EffAddr += pCtx->r12; break;
12140 case 14: u64EffAddr += pCtx->r14; break;
12141 case 15: u64EffAddr += pCtx->r15; break;
12142 /* complicated encodings */
12143 case 5:
12144 case 13:
12145 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12146 {
12147 if (!pVCpu->iem.s.uRexB)
12148 {
12149 u64EffAddr += pCtx->rbp;
12150 SET_SS_DEF();
12151 }
12152 else
12153 u64EffAddr += pCtx->r13;
12154 }
12155 else
12156 {
12157 uint32_t u32Disp;
12158 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12159 u64EffAddr += (int32_t)u32Disp;
12160 }
12161 break;
12162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12163 }
12164 break;
12165 }
12166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12167 }
12168
12169 /* Get and add the displacement. */
12170 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12171 {
12172 case 0:
12173 break;
12174 case 1:
12175 {
12176 int8_t i8Disp;
12177 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12178 u64EffAddr += i8Disp;
12179 break;
12180 }
12181 case 2:
12182 {
12183 uint32_t u32Disp;
12184 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12185 u64EffAddr += (int32_t)u32Disp;
12186 break;
12187 }
12188 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12189 }
12190
12191 }
12192
12193 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12194 *pGCPtrEff = u64EffAddr;
12195 else
12196 {
12197 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12198 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12199 }
12200 }
12201
12202 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12203 return VINF_SUCCESS;
12204}
12205
12206
12207#ifdef IEM_WITH_SETJMP
12208/**
12209 * Calculates the effective address of a ModR/M memory operand.
12210 *
12211 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12212 *
12213 * May longjmp on internal error.
12214 *
12215 * @return The effective address.
12216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12217 * @param bRm The ModRM byte.
12218 * @param cbImm The size of any immediate following the
12219 * effective address opcode bytes. Important for
12220 * RIP relative addressing.
12221 */
12222IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12223{
12224 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12225 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12226# define SET_SS_DEF() \
12227 do \
12228 { \
12229 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12230 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12231 } while (0)
12232
12233 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12234 {
12235/** @todo Check the effective address size crap! */
12236 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12237 {
12238 uint16_t u16EffAddr;
12239
12240 /* Handle the disp16 form with no registers first. */
12241 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12242 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12243 else
12244 {
12245 /* Get the displacment. */
12246 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12247 {
12248 case 0: u16EffAddr = 0; break;
12249 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12250 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12251 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12252 }
12253
12254 /* Add the base and index registers to the disp. */
12255 switch (bRm & X86_MODRM_RM_MASK)
12256 {
12257 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12258 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12259 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12260 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12261 case 4: u16EffAddr += pCtx->si; break;
12262 case 5: u16EffAddr += pCtx->di; break;
12263 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12264 case 7: u16EffAddr += pCtx->bx; break;
12265 }
12266 }
12267
12268 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12269 return u16EffAddr;
12270 }
12271
12272 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12273 uint32_t u32EffAddr;
12274
12275 /* Handle the disp32 form with no registers first. */
12276 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12277 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12278 else
12279 {
12280 /* Get the register (or SIB) value. */
12281 switch ((bRm & X86_MODRM_RM_MASK))
12282 {
12283 case 0: u32EffAddr = pCtx->eax; break;
12284 case 1: u32EffAddr = pCtx->ecx; break;
12285 case 2: u32EffAddr = pCtx->edx; break;
12286 case 3: u32EffAddr = pCtx->ebx; break;
12287 case 4: /* SIB */
12288 {
12289 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12290
12291 /* Get the index and scale it. */
12292 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12293 {
12294 case 0: u32EffAddr = pCtx->eax; break;
12295 case 1: u32EffAddr = pCtx->ecx; break;
12296 case 2: u32EffAddr = pCtx->edx; break;
12297 case 3: u32EffAddr = pCtx->ebx; break;
12298 case 4: u32EffAddr = 0; /*none */ break;
12299 case 5: u32EffAddr = pCtx->ebp; break;
12300 case 6: u32EffAddr = pCtx->esi; break;
12301 case 7: u32EffAddr = pCtx->edi; break;
12302 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12303 }
12304 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12305
12306 /* add base */
12307 switch (bSib & X86_SIB_BASE_MASK)
12308 {
12309 case 0: u32EffAddr += pCtx->eax; break;
12310 case 1: u32EffAddr += pCtx->ecx; break;
12311 case 2: u32EffAddr += pCtx->edx; break;
12312 case 3: u32EffAddr += pCtx->ebx; break;
12313 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12314 case 5:
12315 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12316 {
12317 u32EffAddr += pCtx->ebp;
12318 SET_SS_DEF();
12319 }
12320 else
12321 {
12322 uint32_t u32Disp;
12323 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12324 u32EffAddr += u32Disp;
12325 }
12326 break;
12327 case 6: u32EffAddr += pCtx->esi; break;
12328 case 7: u32EffAddr += pCtx->edi; break;
12329 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12330 }
12331 break;
12332 }
12333 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12334 case 6: u32EffAddr = pCtx->esi; break;
12335 case 7: u32EffAddr = pCtx->edi; break;
12336 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12337 }
12338
12339 /* Get and add the displacement. */
12340 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12341 {
12342 case 0:
12343 break;
12344 case 1:
12345 {
12346 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12347 u32EffAddr += i8Disp;
12348 break;
12349 }
12350 case 2:
12351 {
12352 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12353 u32EffAddr += u32Disp;
12354 break;
12355 }
12356 default:
12357 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12358 }
12359 }
12360
12361 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12362 {
12363 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12364 return u32EffAddr;
12365 }
12366 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12367 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12368 return u32EffAddr & UINT16_MAX;
12369 }
12370
12371 uint64_t u64EffAddr;
12372
12373 /* Handle the rip+disp32 form with no registers first. */
12374 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12375 {
12376 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12377 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12378 }
12379 else
12380 {
12381 /* Get the register (or SIB) value. */
12382 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12383 {
12384 case 0: u64EffAddr = pCtx->rax; break;
12385 case 1: u64EffAddr = pCtx->rcx; break;
12386 case 2: u64EffAddr = pCtx->rdx; break;
12387 case 3: u64EffAddr = pCtx->rbx; break;
12388 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12389 case 6: u64EffAddr = pCtx->rsi; break;
12390 case 7: u64EffAddr = pCtx->rdi; break;
12391 case 8: u64EffAddr = pCtx->r8; break;
12392 case 9: u64EffAddr = pCtx->r9; break;
12393 case 10: u64EffAddr = pCtx->r10; break;
12394 case 11: u64EffAddr = pCtx->r11; break;
12395 case 13: u64EffAddr = pCtx->r13; break;
12396 case 14: u64EffAddr = pCtx->r14; break;
12397 case 15: u64EffAddr = pCtx->r15; break;
12398 /* SIB */
12399 case 4:
12400 case 12:
12401 {
12402 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12403
12404 /* Get the index and scale it. */
12405 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12406 {
12407 case 0: u64EffAddr = pCtx->rax; break;
12408 case 1: u64EffAddr = pCtx->rcx; break;
12409 case 2: u64EffAddr = pCtx->rdx; break;
12410 case 3: u64EffAddr = pCtx->rbx; break;
12411 case 4: u64EffAddr = 0; /*none */ break;
12412 case 5: u64EffAddr = pCtx->rbp; break;
12413 case 6: u64EffAddr = pCtx->rsi; break;
12414 case 7: u64EffAddr = pCtx->rdi; break;
12415 case 8: u64EffAddr = pCtx->r8; break;
12416 case 9: u64EffAddr = pCtx->r9; break;
12417 case 10: u64EffAddr = pCtx->r10; break;
12418 case 11: u64EffAddr = pCtx->r11; break;
12419 case 12: u64EffAddr = pCtx->r12; break;
12420 case 13: u64EffAddr = pCtx->r13; break;
12421 case 14: u64EffAddr = pCtx->r14; break;
12422 case 15: u64EffAddr = pCtx->r15; break;
12423 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12424 }
12425 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12426
12427 /* add base */
12428 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12429 {
12430 case 0: u64EffAddr += pCtx->rax; break;
12431 case 1: u64EffAddr += pCtx->rcx; break;
12432 case 2: u64EffAddr += pCtx->rdx; break;
12433 case 3: u64EffAddr += pCtx->rbx; break;
12434 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12435 case 6: u64EffAddr += pCtx->rsi; break;
12436 case 7: u64EffAddr += pCtx->rdi; break;
12437 case 8: u64EffAddr += pCtx->r8; break;
12438 case 9: u64EffAddr += pCtx->r9; break;
12439 case 10: u64EffAddr += pCtx->r10; break;
12440 case 11: u64EffAddr += pCtx->r11; break;
12441 case 12: u64EffAddr += pCtx->r12; break;
12442 case 14: u64EffAddr += pCtx->r14; break;
12443 case 15: u64EffAddr += pCtx->r15; break;
12444 /* complicated encodings */
12445 case 5:
12446 case 13:
12447 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12448 {
12449 if (!pVCpu->iem.s.uRexB)
12450 {
12451 u64EffAddr += pCtx->rbp;
12452 SET_SS_DEF();
12453 }
12454 else
12455 u64EffAddr += pCtx->r13;
12456 }
12457 else
12458 {
12459 uint32_t u32Disp;
12460 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12461 u64EffAddr += (int32_t)u32Disp;
12462 }
12463 break;
12464 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12465 }
12466 break;
12467 }
12468 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12469 }
12470
12471 /* Get and add the displacement. */
12472 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12473 {
12474 case 0:
12475 break;
12476 case 1:
12477 {
12478 int8_t i8Disp;
12479 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12480 u64EffAddr += i8Disp;
12481 break;
12482 }
12483 case 2:
12484 {
12485 uint32_t u32Disp;
12486 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12487 u64EffAddr += (int32_t)u32Disp;
12488 break;
12489 }
12490 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12491 }
12492
12493 }
12494
12495 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12496 {
12497 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12498 return u64EffAddr;
12499 }
12500 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12501 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12502 return u64EffAddr & UINT32_MAX;
12503}
12504#endif /* IEM_WITH_SETJMP */
12505
12506
12507/** @} */
12508
12509
12510
12511/*
12512 * Include the instructions
12513 */
12514#include "IEMAllInstructions.cpp.h"
12515
12516
12517
12518
12519#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12520
12521/**
12522 * Sets up execution verification mode.
12523 */
12524IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12525{
12526 PVMCPU pVCpu = pVCpu;
12527 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12528
12529 /*
12530 * Always note down the address of the current instruction.
12531 */
12532 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12533 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12534
12535 /*
12536 * Enable verification and/or logging.
12537 */
12538 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12539 if ( fNewNoRem
12540 && ( 0
12541#if 0 /* auto enable on first paged protected mode interrupt */
12542 || ( pOrgCtx->eflags.Bits.u1IF
12543 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12544 && TRPMHasTrap(pVCpu)
12545 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12546#endif
12547#if 0
12548 || ( pOrgCtx->cs == 0x10
12549 && ( pOrgCtx->rip == 0x90119e3e
12550 || pOrgCtx->rip == 0x901d9810)
12551#endif
12552#if 0 /* Auto enable DSL - FPU stuff. */
12553 || ( pOrgCtx->cs == 0x10
12554 && (// pOrgCtx->rip == 0xc02ec07f
12555 //|| pOrgCtx->rip == 0xc02ec082
12556 //|| pOrgCtx->rip == 0xc02ec0c9
12557 0
12558 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12559#endif
12560#if 0 /* Auto enable DSL - fstp st0 stuff. */
12561 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12562#endif
12563#if 0
12564 || pOrgCtx->rip == 0x9022bb3a
12565#endif
12566#if 0
12567 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12568#endif
12569#if 0
12570 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12571 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12572#endif
12573#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12574 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12575 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12576 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12577#endif
12578#if 0 /* NT4SP1 - xadd early boot. */
12579 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12580#endif
12581#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12582 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12583#endif
12584#if 0 /* NT4SP1 - cmpxchg (AMD). */
12585 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12586#endif
12587#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12588 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12589#endif
12590#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12591 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12592
12593#endif
12594#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12595 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12596
12597#endif
12598#if 0 /* NT4SP1 - frstor [ecx] */
12599 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12600#endif
12601#if 0 /* xxxxxx - All long mode code. */
12602 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12603#endif
12604#if 0 /* rep movsq linux 3.7 64-bit boot. */
12605 || (pOrgCtx->rip == 0x0000000000100241)
12606#endif
12607#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12608 || (pOrgCtx->rip == 0x000000000215e240)
12609#endif
12610#if 0 /* DOS's size-overridden iret to v8086. */
12611 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12612#endif
12613 )
12614 )
12615 {
12616 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12617 RTLogFlags(NULL, "enabled");
12618 fNewNoRem = false;
12619 }
12620 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12621 {
12622 pVCpu->iem.s.fNoRem = fNewNoRem;
12623 if (!fNewNoRem)
12624 {
12625 LogAlways(("Enabling verification mode!\n"));
12626 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12627 }
12628 else
12629 LogAlways(("Disabling verification mode!\n"));
12630 }
12631
12632 /*
12633 * Switch state.
12634 */
12635 if (IEM_VERIFICATION_ENABLED(pVCpu))
12636 {
12637 static CPUMCTX s_DebugCtx; /* Ugly! */
12638
12639 s_DebugCtx = *pOrgCtx;
12640 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12641 }
12642
12643 /*
12644 * See if there is an interrupt pending in TRPM and inject it if we can.
12645 */
12646 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12647 if ( pOrgCtx->eflags.Bits.u1IF
12648 && TRPMHasTrap(pVCpu)
12649 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12650 {
12651 uint8_t u8TrapNo;
12652 TRPMEVENT enmType;
12653 RTGCUINT uErrCode;
12654 RTGCPTR uCr2;
12655 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12656 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12657 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12658 TRPMResetTrap(pVCpu);
12659 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12660 }
12661
12662 /*
12663 * Reset the counters.
12664 */
12665 pVCpu->iem.s.cIOReads = 0;
12666 pVCpu->iem.s.cIOWrites = 0;
12667 pVCpu->iem.s.fIgnoreRaxRdx = false;
12668 pVCpu->iem.s.fOverlappingMovs = false;
12669 pVCpu->iem.s.fProblematicMemory = false;
12670 pVCpu->iem.s.fUndefinedEFlags = 0;
12671
12672 if (IEM_VERIFICATION_ENABLED(pVCpu))
12673 {
12674 /*
12675 * Free all verification records.
12676 */
12677 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12678 pVCpu->iem.s.pIemEvtRecHead = NULL;
12679 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12680 do
12681 {
12682 while (pEvtRec)
12683 {
12684 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12685 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12686 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12687 pEvtRec = pNext;
12688 }
12689 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12690 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12691 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12692 } while (pEvtRec);
12693 }
12694}
12695
12696
12697/**
12698 * Allocate an event record.
12699 * @returns Pointer to a record.
12700 */
12701IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12702{
12703 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12704 return NULL;
12705
12706 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12707 if (pEvtRec)
12708 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12709 else
12710 {
12711 if (!pVCpu->iem.s.ppIemEvtRecNext)
12712 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12713
12714 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12715 if (!pEvtRec)
12716 return NULL;
12717 }
12718 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12719 pEvtRec->pNext = NULL;
12720 return pEvtRec;
12721}
12722
12723
12724/**
12725 * IOMMMIORead notification.
12726 */
12727VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12728{
12729 PVMCPU pVCpu = VMMGetCpu(pVM);
12730 if (!pVCpu)
12731 return;
12732 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12733 if (!pEvtRec)
12734 return;
12735 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12736 pEvtRec->u.RamRead.GCPhys = GCPhys;
12737 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12738 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12739 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12740}
12741
12742
12743/**
12744 * IOMMMIOWrite notification.
12745 */
12746VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12747{
12748 PVMCPU pVCpu = VMMGetCpu(pVM);
12749 if (!pVCpu)
12750 return;
12751 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12752 if (!pEvtRec)
12753 return;
12754 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12755 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12756 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12757 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12758 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12759 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12760 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12761 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12762 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12763}
12764
12765
12766/**
12767 * IOMIOPortRead notification.
12768 */
12769VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12770{
12771 PVMCPU pVCpu = VMMGetCpu(pVM);
12772 if (!pVCpu)
12773 return;
12774 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12775 if (!pEvtRec)
12776 return;
12777 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12778 pEvtRec->u.IOPortRead.Port = Port;
12779 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12780 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12781 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12782}
12783
12784/**
12785 * IOMIOPortWrite notification.
12786 */
12787VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12788{
12789 PVMCPU pVCpu = VMMGetCpu(pVM);
12790 if (!pVCpu)
12791 return;
12792 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12793 if (!pEvtRec)
12794 return;
12795 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12796 pEvtRec->u.IOPortWrite.Port = Port;
12797 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12798 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12799 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12800 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12801}
12802
12803
12804VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12805{
12806 PVMCPU pVCpu = VMMGetCpu(pVM);
12807 if (!pVCpu)
12808 return;
12809 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12810 if (!pEvtRec)
12811 return;
12812 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12813 pEvtRec->u.IOPortStrRead.Port = Port;
12814 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12815 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12816 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12817 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12818}
12819
12820
12821VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12822{
12823 PVMCPU pVCpu = VMMGetCpu(pVM);
12824 if (!pVCpu)
12825 return;
12826 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12827 if (!pEvtRec)
12828 return;
12829 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12830 pEvtRec->u.IOPortStrWrite.Port = Port;
12831 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12832 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12833 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12834 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12835}
12836
12837
12838/**
12839 * Fakes and records an I/O port read.
12840 *
12841 * @returns VINF_SUCCESS.
12842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12843 * @param Port The I/O port.
12844 * @param pu32Value Where to store the fake value.
12845 * @param cbValue The size of the access.
12846 */
12847IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12848{
12849 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12850 if (pEvtRec)
12851 {
12852 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12853 pEvtRec->u.IOPortRead.Port = Port;
12854 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12855 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12856 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12857 }
12858 pVCpu->iem.s.cIOReads++;
12859 *pu32Value = 0xcccccccc;
12860 return VINF_SUCCESS;
12861}
12862
12863
12864/**
12865 * Fakes and records an I/O port write.
12866 *
12867 * @returns VINF_SUCCESS.
12868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12869 * @param Port The I/O port.
12870 * @param u32Value The value being written.
12871 * @param cbValue The size of the access.
12872 */
12873IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12874{
12875 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12876 if (pEvtRec)
12877 {
12878 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12879 pEvtRec->u.IOPortWrite.Port = Port;
12880 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12881 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12882 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12883 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12884 }
12885 pVCpu->iem.s.cIOWrites++;
12886 return VINF_SUCCESS;
12887}
12888
12889
12890/**
12891 * Used to add extra details about a stub case.
12892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12893 */
12894IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12895{
12896 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12897 PVM pVM = pVCpu->CTX_SUFF(pVM);
12898 PVMCPU pVCpu = pVCpu;
12899 char szRegs[4096];
12900 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12901 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12902 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12903 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12904 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12905 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12906 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12907 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12908 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12909 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12910 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12911 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12912 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12913 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12914 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12915 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12916 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12917 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12918 " efer=%016VR{efer}\n"
12919 " pat=%016VR{pat}\n"
12920 " sf_mask=%016VR{sf_mask}\n"
12921 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12922 " lstar=%016VR{lstar}\n"
12923 " star=%016VR{star} cstar=%016VR{cstar}\n"
12924 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12925 );
12926
12927 char szInstr1[256];
12928 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12929 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12930 szInstr1, sizeof(szInstr1), NULL);
12931 char szInstr2[256];
12932 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12933 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12934 szInstr2, sizeof(szInstr2), NULL);
12935
12936 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12937}
12938
12939
12940/**
12941 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12942 * dump to the assertion info.
12943 *
12944 * @param pEvtRec The record to dump.
12945 */
12946IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12947{
12948 switch (pEvtRec->enmEvent)
12949 {
12950 case IEMVERIFYEVENT_IOPORT_READ:
12951 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12952 pEvtRec->u.IOPortWrite.Port,
12953 pEvtRec->u.IOPortWrite.cbValue);
12954 break;
12955 case IEMVERIFYEVENT_IOPORT_WRITE:
12956 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12957 pEvtRec->u.IOPortWrite.Port,
12958 pEvtRec->u.IOPortWrite.cbValue,
12959 pEvtRec->u.IOPortWrite.u32Value);
12960 break;
12961 case IEMVERIFYEVENT_IOPORT_STR_READ:
12962 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12963 pEvtRec->u.IOPortStrWrite.Port,
12964 pEvtRec->u.IOPortStrWrite.cbValue,
12965 pEvtRec->u.IOPortStrWrite.cTransfers);
12966 break;
12967 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12968 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12969 pEvtRec->u.IOPortStrWrite.Port,
12970 pEvtRec->u.IOPortStrWrite.cbValue,
12971 pEvtRec->u.IOPortStrWrite.cTransfers);
12972 break;
12973 case IEMVERIFYEVENT_RAM_READ:
12974 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12975 pEvtRec->u.RamRead.GCPhys,
12976 pEvtRec->u.RamRead.cb);
12977 break;
12978 case IEMVERIFYEVENT_RAM_WRITE:
12979 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12980 pEvtRec->u.RamWrite.GCPhys,
12981 pEvtRec->u.RamWrite.cb,
12982 (int)pEvtRec->u.RamWrite.cb,
12983 pEvtRec->u.RamWrite.ab);
12984 break;
12985 default:
12986 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12987 break;
12988 }
12989}
12990
12991
12992/**
12993 * Raises an assertion on the specified record, showing the given message with
12994 * a record dump attached.
12995 *
12996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12997 * @param pEvtRec1 The first record.
12998 * @param pEvtRec2 The second record.
12999 * @param pszMsg The message explaining why we're asserting.
13000 */
13001IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13002{
13003 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13004 iemVerifyAssertAddRecordDump(pEvtRec1);
13005 iemVerifyAssertAddRecordDump(pEvtRec2);
13006 iemVerifyAssertMsg2(pVCpu);
13007 RTAssertPanic();
13008}
13009
13010
13011/**
13012 * Raises an assertion on the specified record, showing the given message with
13013 * a record dump attached.
13014 *
13015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13016 * @param pEvtRec1 The first record.
13017 * @param pszMsg The message explaining why we're asserting.
13018 */
13019IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13020{
13021 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13022 iemVerifyAssertAddRecordDump(pEvtRec);
13023 iemVerifyAssertMsg2(pVCpu);
13024 RTAssertPanic();
13025}
13026
13027
13028/**
13029 * Verifies a write record.
13030 *
13031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13032 * @param pEvtRec The write record.
13033 * @param fRem Set if REM was doing the other executing. If clear
13034 * it was HM.
13035 */
13036IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13037{
13038 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13039 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13040 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13041 if ( RT_FAILURE(rc)
13042 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13043 {
13044 /* fend off ins */
13045 if ( !pVCpu->iem.s.cIOReads
13046 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13047 || ( pEvtRec->u.RamWrite.cb != 1
13048 && pEvtRec->u.RamWrite.cb != 2
13049 && pEvtRec->u.RamWrite.cb != 4) )
13050 {
13051 /* fend off ROMs and MMIO */
13052 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13053 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13054 {
13055 /* fend off fxsave */
13056 if (pEvtRec->u.RamWrite.cb != 512)
13057 {
13058 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13059 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13060 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13061 RTAssertMsg2Add("%s: %.*Rhxs\n"
13062 "iem: %.*Rhxs\n",
13063 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13064 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13065 iemVerifyAssertAddRecordDump(pEvtRec);
13066 iemVerifyAssertMsg2(pVCpu);
13067 RTAssertPanic();
13068 }
13069 }
13070 }
13071 }
13072
13073}
13074
13075/**
13076 * Performs the post-execution verfication checks.
13077 */
13078IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13079{
13080 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13081 return rcStrictIem;
13082
13083 /*
13084 * Switch back the state.
13085 */
13086 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13087 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13088 Assert(pOrgCtx != pDebugCtx);
13089 IEM_GET_CTX(pVCpu) = pOrgCtx;
13090
13091 /*
13092 * Execute the instruction in REM.
13093 */
13094 bool fRem = false;
13095 PVM pVM = pVCpu->CTX_SUFF(pVM);
13096 PVMCPU pVCpu = pVCpu;
13097 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13098#ifdef IEM_VERIFICATION_MODE_FULL_HM
13099 if ( HMIsEnabled(pVM)
13100 && pVCpu->iem.s.cIOReads == 0
13101 && pVCpu->iem.s.cIOWrites == 0
13102 && !pVCpu->iem.s.fProblematicMemory)
13103 {
13104 uint64_t uStartRip = pOrgCtx->rip;
13105 unsigned iLoops = 0;
13106 do
13107 {
13108 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13109 iLoops++;
13110 } while ( rc == VINF_SUCCESS
13111 || ( rc == VINF_EM_DBG_STEPPED
13112 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13113 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13114 || ( pOrgCtx->rip != pDebugCtx->rip
13115 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13116 && iLoops < 8) );
13117 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13118 rc = VINF_SUCCESS;
13119 }
13120#endif
13121 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13122 || rc == VINF_IOM_R3_IOPORT_READ
13123 || rc == VINF_IOM_R3_IOPORT_WRITE
13124 || rc == VINF_IOM_R3_MMIO_READ
13125 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13126 || rc == VINF_IOM_R3_MMIO_WRITE
13127 || rc == VINF_CPUM_R3_MSR_READ
13128 || rc == VINF_CPUM_R3_MSR_WRITE
13129 || rc == VINF_EM_RESCHEDULE
13130 )
13131 {
13132 EMRemLock(pVM);
13133 rc = REMR3EmulateInstruction(pVM, pVCpu);
13134 AssertRC(rc);
13135 EMRemUnlock(pVM);
13136 fRem = true;
13137 }
13138
13139# if 1 /* Skip unimplemented instructions for now. */
13140 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13141 {
13142 IEM_GET_CTX(pVCpu) = pOrgCtx;
13143 if (rc == VINF_EM_DBG_STEPPED)
13144 return VINF_SUCCESS;
13145 return rc;
13146 }
13147# endif
13148
13149 /*
13150 * Compare the register states.
13151 */
13152 unsigned cDiffs = 0;
13153 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13154 {
13155 //Log(("REM and IEM ends up with different registers!\n"));
13156 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13157
13158# define CHECK_FIELD(a_Field) \
13159 do \
13160 { \
13161 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13162 { \
13163 switch (sizeof(pOrgCtx->a_Field)) \
13164 { \
13165 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13166 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13167 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13168 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13169 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13170 } \
13171 cDiffs++; \
13172 } \
13173 } while (0)
13174# define CHECK_XSTATE_FIELD(a_Field) \
13175 do \
13176 { \
13177 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13178 { \
13179 switch (sizeof(pOrgXState->a_Field)) \
13180 { \
13181 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13182 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13183 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13184 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13185 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13186 } \
13187 cDiffs++; \
13188 } \
13189 } while (0)
13190
13191# define CHECK_BIT_FIELD(a_Field) \
13192 do \
13193 { \
13194 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13195 { \
13196 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13197 cDiffs++; \
13198 } \
13199 } while (0)
13200
13201# define CHECK_SEL(a_Sel) \
13202 do \
13203 { \
13204 CHECK_FIELD(a_Sel.Sel); \
13205 CHECK_FIELD(a_Sel.Attr.u); \
13206 CHECK_FIELD(a_Sel.u64Base); \
13207 CHECK_FIELD(a_Sel.u32Limit); \
13208 CHECK_FIELD(a_Sel.fFlags); \
13209 } while (0)
13210
13211 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13212 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13213
13214#if 1 /* The recompiler doesn't update these the intel way. */
13215 if (fRem)
13216 {
13217 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13218 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13219 pOrgXState->x87.CS = pDebugXState->x87.CS;
13220 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13221 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13222 pOrgXState->x87.DS = pDebugXState->x87.DS;
13223 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13224 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13225 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13226 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13227 }
13228#endif
13229 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13230 {
13231 RTAssertMsg2Weak(" the FPU state differs\n");
13232 cDiffs++;
13233 CHECK_XSTATE_FIELD(x87.FCW);
13234 CHECK_XSTATE_FIELD(x87.FSW);
13235 CHECK_XSTATE_FIELD(x87.FTW);
13236 CHECK_XSTATE_FIELD(x87.FOP);
13237 CHECK_XSTATE_FIELD(x87.FPUIP);
13238 CHECK_XSTATE_FIELD(x87.CS);
13239 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13240 CHECK_XSTATE_FIELD(x87.FPUDP);
13241 CHECK_XSTATE_FIELD(x87.DS);
13242 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13243 CHECK_XSTATE_FIELD(x87.MXCSR);
13244 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13245 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13246 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13247 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13248 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13249 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13250 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13251 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13252 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13253 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13254 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13255 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13256 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13257 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13258 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13259 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13260 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13261 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13262 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13263 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13264 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13265 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13266 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13267 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13268 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13269 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13270 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13271 }
13272 CHECK_FIELD(rip);
13273 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13274 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13275 {
13276 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13277 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13278 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13279 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13280 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13281 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13282 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13283 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13284 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13285 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13286 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13287 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13288 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13289 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13290 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13291 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13292 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13293 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13294 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13295 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13296 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13297 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13298 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13299 }
13300
13301 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13302 CHECK_FIELD(rax);
13303 CHECK_FIELD(rcx);
13304 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13305 CHECK_FIELD(rdx);
13306 CHECK_FIELD(rbx);
13307 CHECK_FIELD(rsp);
13308 CHECK_FIELD(rbp);
13309 CHECK_FIELD(rsi);
13310 CHECK_FIELD(rdi);
13311 CHECK_FIELD(r8);
13312 CHECK_FIELD(r9);
13313 CHECK_FIELD(r10);
13314 CHECK_FIELD(r11);
13315 CHECK_FIELD(r12);
13316 CHECK_FIELD(r13);
13317 CHECK_SEL(cs);
13318 CHECK_SEL(ss);
13319 CHECK_SEL(ds);
13320 CHECK_SEL(es);
13321 CHECK_SEL(fs);
13322 CHECK_SEL(gs);
13323 CHECK_FIELD(cr0);
13324
13325 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13326 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13327 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13328 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13329 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13330 {
13331 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13332 { /* ignore */ }
13333 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13334 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13335 && fRem)
13336 { /* ignore */ }
13337 else
13338 CHECK_FIELD(cr2);
13339 }
13340 CHECK_FIELD(cr3);
13341 CHECK_FIELD(cr4);
13342 CHECK_FIELD(dr[0]);
13343 CHECK_FIELD(dr[1]);
13344 CHECK_FIELD(dr[2]);
13345 CHECK_FIELD(dr[3]);
13346 CHECK_FIELD(dr[6]);
13347 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13348 CHECK_FIELD(dr[7]);
13349 CHECK_FIELD(gdtr.cbGdt);
13350 CHECK_FIELD(gdtr.pGdt);
13351 CHECK_FIELD(idtr.cbIdt);
13352 CHECK_FIELD(idtr.pIdt);
13353 CHECK_SEL(ldtr);
13354 CHECK_SEL(tr);
13355 CHECK_FIELD(SysEnter.cs);
13356 CHECK_FIELD(SysEnter.eip);
13357 CHECK_FIELD(SysEnter.esp);
13358 CHECK_FIELD(msrEFER);
13359 CHECK_FIELD(msrSTAR);
13360 CHECK_FIELD(msrPAT);
13361 CHECK_FIELD(msrLSTAR);
13362 CHECK_FIELD(msrCSTAR);
13363 CHECK_FIELD(msrSFMASK);
13364 CHECK_FIELD(msrKERNELGSBASE);
13365
13366 if (cDiffs != 0)
13367 {
13368 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13369 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13370 RTAssertPanic();
13371 static bool volatile s_fEnterDebugger = true;
13372 if (s_fEnterDebugger)
13373 DBGFSTOP(pVM);
13374
13375# if 1 /* Ignore unimplemented instructions for now. */
13376 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13377 rcStrictIem = VINF_SUCCESS;
13378# endif
13379 }
13380# undef CHECK_FIELD
13381# undef CHECK_BIT_FIELD
13382 }
13383
13384 /*
13385 * If the register state compared fine, check the verification event
13386 * records.
13387 */
13388 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13389 {
13390 /*
13391 * Compare verficiation event records.
13392 * - I/O port accesses should be a 1:1 match.
13393 */
13394 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13395 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13396 while (pIemRec && pOtherRec)
13397 {
13398 /* Since we might miss RAM writes and reads, ignore reads and check
13399 that any written memory is the same extra ones. */
13400 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13401 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13402 && pIemRec->pNext)
13403 {
13404 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13405 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13406 pIemRec = pIemRec->pNext;
13407 }
13408
13409 /* Do the compare. */
13410 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13411 {
13412 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13413 break;
13414 }
13415 bool fEquals;
13416 switch (pIemRec->enmEvent)
13417 {
13418 case IEMVERIFYEVENT_IOPORT_READ:
13419 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13420 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13421 break;
13422 case IEMVERIFYEVENT_IOPORT_WRITE:
13423 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13424 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13425 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13426 break;
13427 case IEMVERIFYEVENT_IOPORT_STR_READ:
13428 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13429 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13430 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13431 break;
13432 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13433 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13434 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13435 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13436 break;
13437 case IEMVERIFYEVENT_RAM_READ:
13438 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13439 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13440 break;
13441 case IEMVERIFYEVENT_RAM_WRITE:
13442 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13443 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13444 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13445 break;
13446 default:
13447 fEquals = false;
13448 break;
13449 }
13450 if (!fEquals)
13451 {
13452 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13453 break;
13454 }
13455
13456 /* advance */
13457 pIemRec = pIemRec->pNext;
13458 pOtherRec = pOtherRec->pNext;
13459 }
13460
13461 /* Ignore extra writes and reads. */
13462 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13463 {
13464 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13465 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13466 pIemRec = pIemRec->pNext;
13467 }
13468 if (pIemRec != NULL)
13469 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13470 else if (pOtherRec != NULL)
13471 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13472 }
13473 IEM_GET_CTX(pVCpu) = pOrgCtx;
13474
13475 return rcStrictIem;
13476}
13477
13478#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13479
13480/* stubs */
13481IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13482{
13483 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13484 return VERR_INTERNAL_ERROR;
13485}
13486
13487IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13488{
13489 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13490 return VERR_INTERNAL_ERROR;
13491}
13492
13493#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13494
13495
13496#ifdef LOG_ENABLED
13497/**
13498 * Logs the current instruction.
13499 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13500 * @param pCtx The current CPU context.
13501 * @param fSameCtx Set if we have the same context information as the VMM,
13502 * clear if we may have already executed an instruction in
13503 * our debug context. When clear, we assume IEMCPU holds
13504 * valid CPU mode info.
13505 */
13506IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13507{
13508# ifdef IN_RING3
13509 if (LogIs2Enabled())
13510 {
13511 char szInstr[256];
13512 uint32_t cbInstr = 0;
13513 if (fSameCtx)
13514 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13515 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13516 szInstr, sizeof(szInstr), &cbInstr);
13517 else
13518 {
13519 uint32_t fFlags = 0;
13520 switch (pVCpu->iem.s.enmCpuMode)
13521 {
13522 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13523 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13524 case IEMMODE_16BIT:
13525 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13526 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13527 else
13528 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13529 break;
13530 }
13531 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13532 szInstr, sizeof(szInstr), &cbInstr);
13533 }
13534
13535 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13536 Log2(("****\n"
13537 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13538 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13539 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13540 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13541 " %s\n"
13542 ,
13543 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13544 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13545 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13546 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13547 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13548 szInstr));
13549
13550 if (LogIs3Enabled())
13551 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13552 }
13553 else
13554# endif
13555 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13556 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13557 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13558}
13559#endif
13560
13561
13562/**
13563 * Makes status code addjustments (pass up from I/O and access handler)
13564 * as well as maintaining statistics.
13565 *
13566 * @returns Strict VBox status code to pass up.
13567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13568 * @param rcStrict The status from executing an instruction.
13569 */
13570DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13571{
13572 if (rcStrict != VINF_SUCCESS)
13573 {
13574 if (RT_SUCCESS(rcStrict))
13575 {
13576 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13577 || rcStrict == VINF_IOM_R3_IOPORT_READ
13578 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13579 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13580 || rcStrict == VINF_IOM_R3_MMIO_READ
13581 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13582 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13583 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13584 || rcStrict == VINF_CPUM_R3_MSR_READ
13585 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13586 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13587 || rcStrict == VINF_EM_RAW_TO_R3
13588 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13589 /* raw-mode / virt handlers only: */
13590 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13591 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13592 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13593 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13594 || rcStrict == VINF_SELM_SYNC_GDT
13595 || rcStrict == VINF_CSAM_PENDING_ACTION
13596 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13597 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13598/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13599 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13600 if (rcPassUp == VINF_SUCCESS)
13601 pVCpu->iem.s.cRetInfStatuses++;
13602 else if ( rcPassUp < VINF_EM_FIRST
13603 || rcPassUp > VINF_EM_LAST
13604 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13605 {
13606 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13607 pVCpu->iem.s.cRetPassUpStatus++;
13608 rcStrict = rcPassUp;
13609 }
13610 else
13611 {
13612 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13613 pVCpu->iem.s.cRetInfStatuses++;
13614 }
13615 }
13616 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13617 pVCpu->iem.s.cRetAspectNotImplemented++;
13618 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13619 pVCpu->iem.s.cRetInstrNotImplemented++;
13620#ifdef IEM_VERIFICATION_MODE_FULL
13621 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13622 rcStrict = VINF_SUCCESS;
13623#endif
13624 else
13625 pVCpu->iem.s.cRetErrStatuses++;
13626 }
13627 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13628 {
13629 pVCpu->iem.s.cRetPassUpStatus++;
13630 rcStrict = pVCpu->iem.s.rcPassUp;
13631 }
13632
13633 return rcStrict;
13634}
13635
13636
13637/**
13638 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13639 * IEMExecOneWithPrefetchedByPC.
13640 *
13641 * Similar code is found in IEMExecLots.
13642 *
13643 * @return Strict VBox status code.
13644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13646 * @param fExecuteInhibit If set, execute the instruction following CLI,
13647 * POP SS and MOV SS,GR.
13648 */
13649DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13650{
13651#ifdef IEM_WITH_SETJMP
13652 VBOXSTRICTRC rcStrict;
13653 jmp_buf JmpBuf;
13654 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13655 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13656 if ((rcStrict = setjmp(JmpBuf)) == 0)
13657 {
13658 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13659 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13660 }
13661 else
13662 pVCpu->iem.s.cLongJumps++;
13663 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13664#else
13665 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13666 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13667#endif
13668 if (rcStrict == VINF_SUCCESS)
13669 pVCpu->iem.s.cInstructions++;
13670 if (pVCpu->iem.s.cActiveMappings > 0)
13671 {
13672 Assert(rcStrict != VINF_SUCCESS);
13673 iemMemRollback(pVCpu);
13674 }
13675//#ifdef DEBUG
13676// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13677//#endif
13678
13679 /* Execute the next instruction as well if a cli, pop ss or
13680 mov ss, Gr has just completed successfully. */
13681 if ( fExecuteInhibit
13682 && rcStrict == VINF_SUCCESS
13683 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13684 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13685 {
13686 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13687 if (rcStrict == VINF_SUCCESS)
13688 {
13689#ifdef LOG_ENABLED
13690 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13691#endif
13692#ifdef IEM_WITH_SETJMP
13693 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13694 if ((rcStrict = setjmp(JmpBuf)) == 0)
13695 {
13696 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13697 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13698 }
13699 else
13700 pVCpu->iem.s.cLongJumps++;
13701 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13702#else
13703 IEM_OPCODE_GET_NEXT_U8(&b);
13704 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13705#endif
13706 if (rcStrict == VINF_SUCCESS)
13707 pVCpu->iem.s.cInstructions++;
13708 if (pVCpu->iem.s.cActiveMappings > 0)
13709 {
13710 Assert(rcStrict != VINF_SUCCESS);
13711 iemMemRollback(pVCpu);
13712 }
13713 }
13714 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13715 }
13716
13717 /*
13718 * Return value fiddling, statistics and sanity assertions.
13719 */
13720 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13721
13722 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13723 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13724#if defined(IEM_VERIFICATION_MODE_FULL)
13725 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13726 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13727 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13728 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13729#endif
13730 return rcStrict;
13731}
13732
13733
13734#ifdef IN_RC
13735/**
13736 * Re-enters raw-mode or ensure we return to ring-3.
13737 *
13738 * @returns rcStrict, maybe modified.
13739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13740 * @param pCtx The current CPU context.
13741 * @param rcStrict The status code returne by the interpreter.
13742 */
13743DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13744{
13745 if ( !pVCpu->iem.s.fInPatchCode
13746 && ( rcStrict == VINF_SUCCESS
13747 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13748 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13749 {
13750 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13751 CPUMRawEnter(pVCpu);
13752 else
13753 {
13754 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13755 rcStrict = VINF_EM_RESCHEDULE;
13756 }
13757 }
13758 return rcStrict;
13759}
13760#endif
13761
13762
13763/**
13764 * Execute one instruction.
13765 *
13766 * @return Strict VBox status code.
13767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13768 */
13769VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13770{
13771#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13772 if (++pVCpu->iem.s.cVerifyDepth == 1)
13773 iemExecVerificationModeSetup(pVCpu);
13774#endif
13775#ifdef LOG_ENABLED
13776 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13777 iemLogCurInstr(pVCpu, pCtx, true);
13778#endif
13779
13780 /*
13781 * Do the decoding and emulation.
13782 */
13783 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13784 if (rcStrict == VINF_SUCCESS)
13785 rcStrict = iemExecOneInner(pVCpu, true);
13786
13787#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13788 /*
13789 * Assert some sanity.
13790 */
13791 if (pVCpu->iem.s.cVerifyDepth == 1)
13792 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13793 pVCpu->iem.s.cVerifyDepth--;
13794#endif
13795#ifdef IN_RC
13796 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13797#endif
13798 if (rcStrict != VINF_SUCCESS)
13799 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13800 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13801 return rcStrict;
13802}
13803
13804
13805VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13806{
13807 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13808 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13809
13810 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13811 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13812 if (rcStrict == VINF_SUCCESS)
13813 {
13814 rcStrict = iemExecOneInner(pVCpu, true);
13815 if (pcbWritten)
13816 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13817 }
13818
13819#ifdef IN_RC
13820 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13821#endif
13822 return rcStrict;
13823}
13824
13825
13826VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13827 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13828{
13829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13830 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13831
13832 VBOXSTRICTRC rcStrict;
13833 if ( cbOpcodeBytes
13834 && pCtx->rip == OpcodeBytesPC)
13835 {
13836 iemInitDecoder(pVCpu, false);
13837#ifdef IEM_WITH_CODE_TLB
13838 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13839 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13840 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13841 pVCpu->iem.s.offCurInstrStart = 0;
13842 pVCpu->iem.s.offInstrNextByte = 0;
13843#else
13844 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13845 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13846#endif
13847 rcStrict = VINF_SUCCESS;
13848 }
13849 else
13850 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13851 if (rcStrict == VINF_SUCCESS)
13852 {
13853 rcStrict = iemExecOneInner(pVCpu, true);
13854 }
13855
13856#ifdef IN_RC
13857 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13858#endif
13859 return rcStrict;
13860}
13861
13862
13863VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13864{
13865 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13866 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13867
13868 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13869 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13870 if (rcStrict == VINF_SUCCESS)
13871 {
13872 rcStrict = iemExecOneInner(pVCpu, false);
13873 if (pcbWritten)
13874 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13875 }
13876
13877#ifdef IN_RC
13878 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13879#endif
13880 return rcStrict;
13881}
13882
13883
13884VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13885 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13886{
13887 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13888 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13889
13890 VBOXSTRICTRC rcStrict;
13891 if ( cbOpcodeBytes
13892 && pCtx->rip == OpcodeBytesPC)
13893 {
13894 iemInitDecoder(pVCpu, true);
13895#ifdef IEM_WITH_CODE_TLB
13896 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13897 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13898 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13899 pVCpu->iem.s.offCurInstrStart = 0;
13900 pVCpu->iem.s.offInstrNextByte = 0;
13901#else
13902 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13903 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13904#endif
13905 rcStrict = VINF_SUCCESS;
13906 }
13907 else
13908 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13909 if (rcStrict == VINF_SUCCESS)
13910 rcStrict = iemExecOneInner(pVCpu, false);
13911
13912#ifdef IN_RC
13913 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13914#endif
13915 return rcStrict;
13916}
13917
13918
13919/**
13920 * For debugging DISGetParamSize, may come in handy.
13921 *
13922 * @returns Strict VBox status code.
13923 * @param pVCpu The cross context virtual CPU structure of the
13924 * calling EMT.
13925 * @param pCtxCore The context core structure.
13926 * @param OpcodeBytesPC The PC of the opcode bytes.
13927 * @param pvOpcodeBytes Prefeched opcode bytes.
13928 * @param cbOpcodeBytes Number of prefetched bytes.
13929 * @param pcbWritten Where to return the number of bytes written.
13930 * Optional.
13931 */
13932VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13933 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13934 uint32_t *pcbWritten)
13935{
13936 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13937 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13938
13939 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13940 VBOXSTRICTRC rcStrict;
13941 if ( cbOpcodeBytes
13942 && pCtx->rip == OpcodeBytesPC)
13943 {
13944 iemInitDecoder(pVCpu, true);
13945#ifdef IEM_WITH_CODE_TLB
13946 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13947 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13948 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13949 pVCpu->iem.s.offCurInstrStart = 0;
13950 pVCpu->iem.s.offInstrNextByte = 0;
13951#else
13952 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13953 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13954#endif
13955 rcStrict = VINF_SUCCESS;
13956 }
13957 else
13958 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13959 if (rcStrict == VINF_SUCCESS)
13960 {
13961 rcStrict = iemExecOneInner(pVCpu, false);
13962 if (pcbWritten)
13963 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13964 }
13965
13966#ifdef IN_RC
13967 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13968#endif
13969 return rcStrict;
13970}
13971
13972
13973VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13974{
13975 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13976
13977#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13978 /*
13979 * See if there is an interrupt pending in TRPM, inject it if we can.
13980 */
13981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13982# ifdef IEM_VERIFICATION_MODE_FULL
13983 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13984# endif
13985 if ( pCtx->eflags.Bits.u1IF
13986 && TRPMHasTrap(pVCpu)
13987 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13988 {
13989 uint8_t u8TrapNo;
13990 TRPMEVENT enmType;
13991 RTGCUINT uErrCode;
13992 RTGCPTR uCr2;
13993 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13994 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13995 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13996 TRPMResetTrap(pVCpu);
13997 }
13998
13999 /*
14000 * Log the state.
14001 */
14002# ifdef LOG_ENABLED
14003 iemLogCurInstr(pVCpu, pCtx, true);
14004# endif
14005
14006 /*
14007 * Do the decoding and emulation.
14008 */
14009 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14010 if (rcStrict == VINF_SUCCESS)
14011 rcStrict = iemExecOneInner(pVCpu, true);
14012
14013 /*
14014 * Assert some sanity.
14015 */
14016 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14017
14018 /*
14019 * Log and return.
14020 */
14021 if (rcStrict != VINF_SUCCESS)
14022 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14023 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14024 if (pcInstructions)
14025 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14026 return rcStrict;
14027
14028#else /* Not verification mode */
14029
14030 /*
14031 * See if there is an interrupt pending in TRPM, inject it if we can.
14032 */
14033 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14034# ifdef IEM_VERIFICATION_MODE_FULL
14035 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14036# endif
14037 if ( pCtx->eflags.Bits.u1IF
14038 && TRPMHasTrap(pVCpu)
14039 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14040 {
14041 uint8_t u8TrapNo;
14042 TRPMEVENT enmType;
14043 RTGCUINT uErrCode;
14044 RTGCPTR uCr2;
14045 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14046 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14047 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14048 TRPMResetTrap(pVCpu);
14049 }
14050
14051 /*
14052 * Initial decoder init w/ prefetch, then setup setjmp.
14053 */
14054 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14055 if (rcStrict == VINF_SUCCESS)
14056 {
14057# ifdef IEM_WITH_SETJMP
14058 jmp_buf JmpBuf;
14059 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14060 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14061 pVCpu->iem.s.cActiveMappings = 0;
14062 if ((rcStrict = setjmp(JmpBuf)) == 0)
14063# endif
14064 {
14065 /*
14066 * The run loop. We limit ourselves to 4096 instructions right now.
14067 */
14068 PVM pVM = pVCpu->CTX_SUFF(pVM);
14069 uint32_t cInstr = 4096;
14070 for (;;)
14071 {
14072 /*
14073 * Log the state.
14074 */
14075# ifdef LOG_ENABLED
14076 iemLogCurInstr(pVCpu, pCtx, true);
14077# endif
14078
14079 /*
14080 * Do the decoding and emulation.
14081 */
14082 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14083 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14084 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14085 {
14086 Assert(pVCpu->iem.s.cActiveMappings == 0);
14087 pVCpu->iem.s.cInstructions++;
14088 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14089 {
14090 uint32_t fCpu = pVCpu->fLocalForcedActions
14091 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14092 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14093 | VMCPU_FF_TLB_FLUSH
14094# ifdef VBOX_WITH_RAW_MODE
14095 | VMCPU_FF_TRPM_SYNC_IDT
14096 | VMCPU_FF_SELM_SYNC_TSS
14097 | VMCPU_FF_SELM_SYNC_GDT
14098 | VMCPU_FF_SELM_SYNC_LDT
14099# endif
14100 | VMCPU_FF_INHIBIT_INTERRUPTS
14101 | VMCPU_FF_BLOCK_NMIS ));
14102
14103 if (RT_LIKELY( ( !fCpu
14104 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14105 && !pCtx->rflags.Bits.u1IF) )
14106 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14107 {
14108 if (cInstr-- > 0)
14109 {
14110 Assert(pVCpu->iem.s.cActiveMappings == 0);
14111 iemReInitDecoder(pVCpu);
14112 continue;
14113 }
14114 }
14115 }
14116 Assert(pVCpu->iem.s.cActiveMappings == 0);
14117 }
14118 else if (pVCpu->iem.s.cActiveMappings > 0)
14119 iemMemRollback(pVCpu);
14120 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14121 break;
14122 }
14123 }
14124# ifdef IEM_WITH_SETJMP
14125 else
14126 {
14127 if (pVCpu->iem.s.cActiveMappings > 0)
14128 iemMemRollback(pVCpu);
14129 pVCpu->iem.s.cLongJumps++;
14130 }
14131 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14132# endif
14133
14134 /*
14135 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14136 */
14137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14139# if defined(IEM_VERIFICATION_MODE_FULL)
14140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14144# endif
14145 }
14146
14147 /*
14148 * Maybe re-enter raw-mode and log.
14149 */
14150# ifdef IN_RC
14151 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14152# endif
14153 if (rcStrict != VINF_SUCCESS)
14154 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14155 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14156 if (pcInstructions)
14157 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14158 return rcStrict;
14159#endif /* Not verification mode */
14160}
14161
14162
14163
14164/**
14165 * Injects a trap, fault, abort, software interrupt or external interrupt.
14166 *
14167 * The parameter list matches TRPMQueryTrapAll pretty closely.
14168 *
14169 * @returns Strict VBox status code.
14170 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14171 * @param u8TrapNo The trap number.
14172 * @param enmType What type is it (trap/fault/abort), software
14173 * interrupt or hardware interrupt.
14174 * @param uErrCode The error code if applicable.
14175 * @param uCr2 The CR2 value if applicable.
14176 * @param cbInstr The instruction length (only relevant for
14177 * software interrupts).
14178 */
14179VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14180 uint8_t cbInstr)
14181{
14182 iemInitDecoder(pVCpu, false);
14183#ifdef DBGFTRACE_ENABLED
14184 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14185 u8TrapNo, enmType, uErrCode, uCr2);
14186#endif
14187
14188 uint32_t fFlags;
14189 switch (enmType)
14190 {
14191 case TRPM_HARDWARE_INT:
14192 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14193 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14194 uErrCode = uCr2 = 0;
14195 break;
14196
14197 case TRPM_SOFTWARE_INT:
14198 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14199 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14200 uErrCode = uCr2 = 0;
14201 break;
14202
14203 case TRPM_TRAP:
14204 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14205 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14206 if (u8TrapNo == X86_XCPT_PF)
14207 fFlags |= IEM_XCPT_FLAGS_CR2;
14208 switch (u8TrapNo)
14209 {
14210 case X86_XCPT_DF:
14211 case X86_XCPT_TS:
14212 case X86_XCPT_NP:
14213 case X86_XCPT_SS:
14214 case X86_XCPT_PF:
14215 case X86_XCPT_AC:
14216 fFlags |= IEM_XCPT_FLAGS_ERR;
14217 break;
14218
14219 case X86_XCPT_NMI:
14220 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14221 break;
14222 }
14223 break;
14224
14225 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14226 }
14227
14228 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14229}
14230
14231
14232/**
14233 * Injects the active TRPM event.
14234 *
14235 * @returns Strict VBox status code.
14236 * @param pVCpu The cross context virtual CPU structure.
14237 */
14238VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14239{
14240#ifndef IEM_IMPLEMENTS_TASKSWITCH
14241 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14242#else
14243 uint8_t u8TrapNo;
14244 TRPMEVENT enmType;
14245 RTGCUINT uErrCode;
14246 RTGCUINTPTR uCr2;
14247 uint8_t cbInstr;
14248 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14249 if (RT_FAILURE(rc))
14250 return rc;
14251
14252 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14253
14254 /** @todo Are there any other codes that imply the event was successfully
14255 * delivered to the guest? See @bugref{6607}. */
14256 if ( rcStrict == VINF_SUCCESS
14257 || rcStrict == VINF_IEM_RAISED_XCPT)
14258 {
14259 TRPMResetTrap(pVCpu);
14260 }
14261 return rcStrict;
14262#endif
14263}
14264
14265
14266VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14267{
14268 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14269 return VERR_NOT_IMPLEMENTED;
14270}
14271
14272
14273VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14274{
14275 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14276 return VERR_NOT_IMPLEMENTED;
14277}
14278
14279
14280#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14281/**
14282 * Executes a IRET instruction with default operand size.
14283 *
14284 * This is for PATM.
14285 *
14286 * @returns VBox status code.
14287 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14288 * @param pCtxCore The register frame.
14289 */
14290VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14291{
14292 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14293
14294 iemCtxCoreToCtx(pCtx, pCtxCore);
14295 iemInitDecoder(pVCpu);
14296 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14297 if (rcStrict == VINF_SUCCESS)
14298 iemCtxToCtxCore(pCtxCore, pCtx);
14299 else
14300 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14301 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14302 return rcStrict;
14303}
14304#endif
14305
14306
14307/**
14308 * Macro used by the IEMExec* method to check the given instruction length.
14309 *
14310 * Will return on failure!
14311 *
14312 * @param a_cbInstr The given instruction length.
14313 * @param a_cbMin The minimum length.
14314 */
14315#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14316 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14317 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14318
14319
14320/**
14321 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14322 *
14323 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14324 *
14325 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14327 * @param rcStrict The status code to fiddle.
14328 */
14329DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14330{
14331 iemUninitExec(pVCpu);
14332#ifdef IN_RC
14333 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14334 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14335#else
14336 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14337#endif
14338}
14339
14340
14341/**
14342 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14343 *
14344 * This API ASSUMES that the caller has already verified that the guest code is
14345 * allowed to access the I/O port. (The I/O port is in the DX register in the
14346 * guest state.)
14347 *
14348 * @returns Strict VBox status code.
14349 * @param pVCpu The cross context virtual CPU structure.
14350 * @param cbValue The size of the I/O port access (1, 2, or 4).
14351 * @param enmAddrMode The addressing mode.
14352 * @param fRepPrefix Indicates whether a repeat prefix is used
14353 * (doesn't matter which for this instruction).
14354 * @param cbInstr The instruction length in bytes.
14355 * @param iEffSeg The effective segment address.
14356 * @param fIoChecked Whether the access to the I/O port has been
14357 * checked or not. It's typically checked in the
14358 * HM scenario.
14359 */
14360VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14361 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14362{
14363 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14364 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14365
14366 /*
14367 * State init.
14368 */
14369 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14370
14371 /*
14372 * Switch orgy for getting to the right handler.
14373 */
14374 VBOXSTRICTRC rcStrict;
14375 if (fRepPrefix)
14376 {
14377 switch (enmAddrMode)
14378 {
14379 case IEMMODE_16BIT:
14380 switch (cbValue)
14381 {
14382 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14383 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14384 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14385 default:
14386 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14387 }
14388 break;
14389
14390 case IEMMODE_32BIT:
14391 switch (cbValue)
14392 {
14393 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14394 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14395 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14396 default:
14397 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14398 }
14399 break;
14400
14401 case IEMMODE_64BIT:
14402 switch (cbValue)
14403 {
14404 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14405 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14406 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14407 default:
14408 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14409 }
14410 break;
14411
14412 default:
14413 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14414 }
14415 }
14416 else
14417 {
14418 switch (enmAddrMode)
14419 {
14420 case IEMMODE_16BIT:
14421 switch (cbValue)
14422 {
14423 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14424 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14425 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14426 default:
14427 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14428 }
14429 break;
14430
14431 case IEMMODE_32BIT:
14432 switch (cbValue)
14433 {
14434 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14435 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14436 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14437 default:
14438 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14439 }
14440 break;
14441
14442 case IEMMODE_64BIT:
14443 switch (cbValue)
14444 {
14445 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14446 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14447 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14448 default:
14449 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14450 }
14451 break;
14452
14453 default:
14454 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14455 }
14456 }
14457
14458 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14459}
14460
14461
14462/**
14463 * Interface for HM and EM for executing string I/O IN (read) instructions.
14464 *
14465 * This API ASSUMES that the caller has already verified that the guest code is
14466 * allowed to access the I/O port. (The I/O port is in the DX register in the
14467 * guest state.)
14468 *
14469 * @returns Strict VBox status code.
14470 * @param pVCpu The cross context virtual CPU structure.
14471 * @param cbValue The size of the I/O port access (1, 2, or 4).
14472 * @param enmAddrMode The addressing mode.
14473 * @param fRepPrefix Indicates whether a repeat prefix is used
14474 * (doesn't matter which for this instruction).
14475 * @param cbInstr The instruction length in bytes.
14476 * @param fIoChecked Whether the access to the I/O port has been
14477 * checked or not. It's typically checked in the
14478 * HM scenario.
14479 */
14480VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14481 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14482{
14483 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14484
14485 /*
14486 * State init.
14487 */
14488 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14489
14490 /*
14491 * Switch orgy for getting to the right handler.
14492 */
14493 VBOXSTRICTRC rcStrict;
14494 if (fRepPrefix)
14495 {
14496 switch (enmAddrMode)
14497 {
14498 case IEMMODE_16BIT:
14499 switch (cbValue)
14500 {
14501 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14502 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14503 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14504 default:
14505 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14506 }
14507 break;
14508
14509 case IEMMODE_32BIT:
14510 switch (cbValue)
14511 {
14512 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14513 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14514 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14515 default:
14516 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14517 }
14518 break;
14519
14520 case IEMMODE_64BIT:
14521 switch (cbValue)
14522 {
14523 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14524 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14525 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14526 default:
14527 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14528 }
14529 break;
14530
14531 default:
14532 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14533 }
14534 }
14535 else
14536 {
14537 switch (enmAddrMode)
14538 {
14539 case IEMMODE_16BIT:
14540 switch (cbValue)
14541 {
14542 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14543 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14544 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14545 default:
14546 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14547 }
14548 break;
14549
14550 case IEMMODE_32BIT:
14551 switch (cbValue)
14552 {
14553 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14554 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14555 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14556 default:
14557 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14558 }
14559 break;
14560
14561 case IEMMODE_64BIT:
14562 switch (cbValue)
14563 {
14564 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14565 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14566 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14567 default:
14568 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14569 }
14570 break;
14571
14572 default:
14573 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14574 }
14575 }
14576
14577 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14578}
14579
14580
14581/**
14582 * Interface for rawmode to write execute an OUT instruction.
14583 *
14584 * @returns Strict VBox status code.
14585 * @param pVCpu The cross context virtual CPU structure.
14586 * @param cbInstr The instruction length in bytes.
14587 * @param u16Port The port to read.
14588 * @param cbReg The register size.
14589 *
14590 * @remarks In ring-0 not all of the state needs to be synced in.
14591 */
14592VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14593{
14594 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14595 Assert(cbReg <= 4 && cbReg != 3);
14596
14597 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14598 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14600}
14601
14602
14603/**
14604 * Interface for rawmode to write execute an IN instruction.
14605 *
14606 * @returns Strict VBox status code.
14607 * @param pVCpu The cross context virtual CPU structure.
14608 * @param cbInstr The instruction length in bytes.
14609 * @param u16Port The port to read.
14610 * @param cbReg The register size.
14611 */
14612VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14613{
14614 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14615 Assert(cbReg <= 4 && cbReg != 3);
14616
14617 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14618 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14619 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14620}
14621
14622
14623/**
14624 * Interface for HM and EM to write to a CRx register.
14625 *
14626 * @returns Strict VBox status code.
14627 * @param pVCpu The cross context virtual CPU structure.
14628 * @param cbInstr The instruction length in bytes.
14629 * @param iCrReg The control register number (destination).
14630 * @param iGReg The general purpose register number (source).
14631 *
14632 * @remarks In ring-0 not all of the state needs to be synced in.
14633 */
14634VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14635{
14636 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14637 Assert(iCrReg < 16);
14638 Assert(iGReg < 16);
14639
14640 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14641 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14642 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14643}
14644
14645
14646/**
14647 * Interface for HM and EM to read from a CRx register.
14648 *
14649 * @returns Strict VBox status code.
14650 * @param pVCpu The cross context virtual CPU structure.
14651 * @param cbInstr The instruction length in bytes.
14652 * @param iGReg The general purpose register number (destination).
14653 * @param iCrReg The control register number (source).
14654 *
14655 * @remarks In ring-0 not all of the state needs to be synced in.
14656 */
14657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14658{
14659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14660 Assert(iCrReg < 16);
14661 Assert(iGReg < 16);
14662
14663 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14664 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14665 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14666}
14667
14668
14669/**
14670 * Interface for HM and EM to clear the CR0[TS] bit.
14671 *
14672 * @returns Strict VBox status code.
14673 * @param pVCpu The cross context virtual CPU structure.
14674 * @param cbInstr The instruction length in bytes.
14675 *
14676 * @remarks In ring-0 not all of the state needs to be synced in.
14677 */
14678VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14679{
14680 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14681
14682 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14683 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14684 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14685}
14686
14687
14688/**
14689 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14690 *
14691 * @returns Strict VBox status code.
14692 * @param pVCpu The cross context virtual CPU structure.
14693 * @param cbInstr The instruction length in bytes.
14694 * @param uValue The value to load into CR0.
14695 *
14696 * @remarks In ring-0 not all of the state needs to be synced in.
14697 */
14698VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14699{
14700 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14701
14702 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14703 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14705}
14706
14707
14708/**
14709 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14710 *
14711 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14712 *
14713 * @returns Strict VBox status code.
14714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14715 * @param cbInstr The instruction length in bytes.
14716 * @remarks In ring-0 not all of the state needs to be synced in.
14717 * @thread EMT(pVCpu)
14718 */
14719VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14720{
14721 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14722
14723 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14725 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14726}
14727
14728#ifdef IN_RING3
14729
14730/**
14731 * Handles the unlikely and probably fatal merge cases.
14732 *
14733 * @returns Merged status code.
14734 * @param rcStrict Current EM status code.
14735 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14736 * with @a rcStrict.
14737 * @param iMemMap The memory mapping index. For error reporting only.
14738 * @param pVCpu The cross context virtual CPU structure of the calling
14739 * thread, for error reporting only.
14740 */
14741DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14742 unsigned iMemMap, PVMCPU pVCpu)
14743{
14744 if (RT_FAILURE_NP(rcStrict))
14745 return rcStrict;
14746
14747 if (RT_FAILURE_NP(rcStrictCommit))
14748 return rcStrictCommit;
14749
14750 if (rcStrict == rcStrictCommit)
14751 return rcStrictCommit;
14752
14753 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14754 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14755 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14756 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14758 return VERR_IOM_FF_STATUS_IPE;
14759}
14760
14761
14762/**
14763 * Helper for IOMR3ProcessForceFlag.
14764 *
14765 * @returns Merged status code.
14766 * @param rcStrict Current EM status code.
14767 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14768 * with @a rcStrict.
14769 * @param iMemMap The memory mapping index. For error reporting only.
14770 * @param pVCpu The cross context virtual CPU structure of the calling
14771 * thread, for error reporting only.
14772 */
14773DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14774{
14775 /* Simple. */
14776 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14777 return rcStrictCommit;
14778
14779 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14780 return rcStrict;
14781
14782 /* EM scheduling status codes. */
14783 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14784 && rcStrict <= VINF_EM_LAST))
14785 {
14786 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14787 && rcStrictCommit <= VINF_EM_LAST))
14788 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14789 }
14790
14791 /* Unlikely */
14792 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14793}
14794
14795
14796/**
14797 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14798 *
14799 * @returns Merge between @a rcStrict and what the commit operation returned.
14800 * @param pVM The cross context VM structure.
14801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14802 * @param rcStrict The status code returned by ring-0 or raw-mode.
14803 */
14804VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14805{
14806 /*
14807 * Reset the pending commit.
14808 */
14809 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14810 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14811 ("%#x %#x %#x\n",
14812 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14813 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14814
14815 /*
14816 * Commit the pending bounce buffers (usually just one).
14817 */
14818 unsigned cBufs = 0;
14819 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14820 while (iMemMap-- > 0)
14821 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14822 {
14823 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14824 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14825 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14826
14827 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14828 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14829 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14830
14831 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14832 {
14833 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14835 pbBuf,
14836 cbFirst,
14837 PGMACCESSORIGIN_IEM);
14838 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14839 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14840 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14841 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14842 }
14843
14844 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14845 {
14846 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14848 pbBuf + cbFirst,
14849 cbSecond,
14850 PGMACCESSORIGIN_IEM);
14851 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14852 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14853 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14854 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14855 }
14856 cBufs++;
14857 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14858 }
14859
14860 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14861 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14862 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14863 pVCpu->iem.s.cActiveMappings = 0;
14864 return rcStrict;
14865}
14866
14867#endif /* IN_RING3 */
14868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette