VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65191

Last change on this file since 65191 was 65191, checked in by vboxsync, 8 years ago

IEM: Typo caused FUCOMPP trouble.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 574.2 KB
Line 
1/* $Id: IEMAll.cpp 65191 2017-01-07 22:39:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
878# ifdef IEM_WITH_CODE_TLB
879 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
880 pVCpu->iem.s.pbInstrBuf = NULL;
881 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
882 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
883 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
884 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
885# else
886 pVCpu->iem.s.offOpcode = 127;
887 pVCpu->iem.s.cbOpcode = 127;
888# endif
889#endif
890
891 pVCpu->iem.s.cActiveMappings = 0;
892 pVCpu->iem.s.iNextMapping = 0;
893 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
894 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
895#ifdef VBOX_WITH_RAW_MODE_NOT_R0
896 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
897 && pCtx->cs.u64Base == 0
898 && pCtx->cs.u32Limit == UINT32_MAX
899 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
900 if (!pVCpu->iem.s.fInPatchCode)
901 CPUMRawLeave(pVCpu, VINF_SUCCESS);
902#endif
903
904#ifdef IEM_VERIFICATION_MODE_FULL
905 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
906 pVCpu->iem.s.fNoRem = true;
907#endif
908}
909
910
911/**
912 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
913 *
914 * @param pVCpu The cross context virtual CPU structure of the
915 * calling thread.
916 */
917DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
918{
919 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
920#ifdef IEM_VERIFICATION_MODE_FULL
921 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
922#endif
923#ifdef VBOX_STRICT
924# ifdef IEM_WITH_CODE_TLB
925# else
926 pVCpu->iem.s.cbOpcode = 0;
927# endif
928#else
929 NOREF(pVCpu);
930#endif
931}
932
933
934/**
935 * Initializes the decoder state.
936 *
937 * iemReInitDecoder is mostly a copy of this function.
938 *
939 * @param pVCpu The cross context virtual CPU structure of the
940 * calling thread.
941 * @param fBypassHandlers Whether to bypass access handlers.
942 */
943DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
944{
945 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
946
947 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
948
949#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
952 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
958#endif
959
960#ifdef VBOX_WITH_RAW_MODE_NOT_R0
961 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
962#endif
963 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
964#ifdef IEM_VERIFICATION_MODE_FULL
965 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
966 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
967#endif
968 IEMMODE enmMode = iemCalcCpuMode(pCtx);
969 pVCpu->iem.s.enmCpuMode = enmMode;
970 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
971 pVCpu->iem.s.enmEffAddrMode = enmMode;
972 if (enmMode != IEMMODE_64BIT)
973 {
974 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
975 pVCpu->iem.s.enmEffOpSize = enmMode;
976 }
977 else
978 {
979 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
980 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
981 }
982 pVCpu->iem.s.fPrefixes = 0;
983 pVCpu->iem.s.uRexReg = 0;
984 pVCpu->iem.s.uRexB = 0;
985 pVCpu->iem.s.uRexIndex = 0;
986 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
987#ifdef IEM_WITH_CODE_TLB
988 pVCpu->iem.s.pbInstrBuf = NULL;
989 pVCpu->iem.s.offInstrNextByte = 0;
990 pVCpu->iem.s.offCurInstrStart = 0;
991# ifdef VBOX_STRICT
992 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
993 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
994 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
995# endif
996#else
997 pVCpu->iem.s.offOpcode = 0;
998 pVCpu->iem.s.cbOpcode = 0;
999#endif
1000 pVCpu->iem.s.cActiveMappings = 0;
1001 pVCpu->iem.s.iNextMapping = 0;
1002 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1003 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1004#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1005 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1006 && pCtx->cs.u64Base == 0
1007 && pCtx->cs.u32Limit == UINT32_MAX
1008 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1009 if (!pVCpu->iem.s.fInPatchCode)
1010 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1011#endif
1012
1013#ifdef DBGFTRACE_ENABLED
1014 switch (enmMode)
1015 {
1016 case IEMMODE_64BIT:
1017 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1018 break;
1019 case IEMMODE_32BIT:
1020 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1021 break;
1022 case IEMMODE_16BIT:
1023 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1024 break;
1025 }
1026#endif
1027}
1028
1029
1030/**
1031 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1032 *
1033 * This is mostly a copy of iemInitDecoder.
1034 *
1035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1036 */
1037DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1038{
1039 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1040
1041 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1042
1043#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1052#endif
1053
1054 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1055#ifdef IEM_VERIFICATION_MODE_FULL
1056 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1057 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1058#endif
1059 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1060 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1061 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1062 pVCpu->iem.s.enmEffAddrMode = enmMode;
1063 if (enmMode != IEMMODE_64BIT)
1064 {
1065 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1066 pVCpu->iem.s.enmEffOpSize = enmMode;
1067 }
1068 else
1069 {
1070 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1071 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1072 }
1073 pVCpu->iem.s.fPrefixes = 0;
1074 pVCpu->iem.s.uRexReg = 0;
1075 pVCpu->iem.s.uRexB = 0;
1076 pVCpu->iem.s.uRexIndex = 0;
1077 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1078#ifdef IEM_WITH_CODE_TLB
1079 if (pVCpu->iem.s.pbInstrBuf)
1080 {
1081 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1082 - pVCpu->iem.s.uInstrBufPc;
1083 if (off < pVCpu->iem.s.cbInstrBufTotal)
1084 {
1085 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1086 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1087 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1088 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1089 else
1090 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1091 }
1092 else
1093 {
1094 pVCpu->iem.s.pbInstrBuf = NULL;
1095 pVCpu->iem.s.offInstrNextByte = 0;
1096 pVCpu->iem.s.offCurInstrStart = 0;
1097 pVCpu->iem.s.cbInstrBuf = 0;
1098 pVCpu->iem.s.cbInstrBufTotal = 0;
1099 }
1100 }
1101 else
1102 {
1103 pVCpu->iem.s.offInstrNextByte = 0;
1104 pVCpu->iem.s.offCurInstrStart = 0;
1105 pVCpu->iem.s.cbInstrBuf = 0;
1106 pVCpu->iem.s.cbInstrBufTotal = 0;
1107 }
1108#else
1109 pVCpu->iem.s.cbOpcode = 0;
1110 pVCpu->iem.s.offOpcode = 0;
1111#endif
1112 Assert(pVCpu->iem.s.cActiveMappings == 0);
1113 pVCpu->iem.s.iNextMapping = 0;
1114 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1115 Assert(pVCpu->iem.s.fBypassHandlers == false);
1116#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1117 if (!pVCpu->iem.s.fInPatchCode)
1118 { /* likely */ }
1119 else
1120 {
1121 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1122 && pCtx->cs.u64Base == 0
1123 && pCtx->cs.u32Limit == UINT32_MAX
1124 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1125 if (!pVCpu->iem.s.fInPatchCode)
1126 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1127 }
1128#endif
1129
1130#ifdef DBGFTRACE_ENABLED
1131 switch (enmMode)
1132 {
1133 case IEMMODE_64BIT:
1134 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1135 break;
1136 case IEMMODE_32BIT:
1137 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1138 break;
1139 case IEMMODE_16BIT:
1140 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1141 break;
1142 }
1143#endif
1144}
1145
1146
1147
1148/**
1149 * Prefetch opcodes the first time when starting executing.
1150 *
1151 * @returns Strict VBox status code.
1152 * @param pVCpu The cross context virtual CPU structure of the
1153 * calling thread.
1154 * @param fBypassHandlers Whether to bypass access handlers.
1155 */
1156IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1157{
1158#ifdef IEM_VERIFICATION_MODE_FULL
1159 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1160#endif
1161 iemInitDecoder(pVCpu, fBypassHandlers);
1162
1163#ifdef IEM_WITH_CODE_TLB
1164 /** @todo Do ITLB lookup here. */
1165
1166#else /* !IEM_WITH_CODE_TLB */
1167
1168 /*
1169 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1170 *
1171 * First translate CS:rIP to a physical address.
1172 */
1173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1174 uint32_t cbToTryRead;
1175 RTGCPTR GCPtrPC;
1176 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1177 {
1178 cbToTryRead = PAGE_SIZE;
1179 GCPtrPC = pCtx->rip;
1180 if (!IEM_IS_CANONICAL(GCPtrPC))
1181 return iemRaiseGeneralProtectionFault0(pVCpu);
1182 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1183 }
1184 else
1185 {
1186 uint32_t GCPtrPC32 = pCtx->eip;
1187 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1188 if (GCPtrPC32 > pCtx->cs.u32Limit)
1189 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1190 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1191 if (!cbToTryRead) /* overflowed */
1192 {
1193 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1194 cbToTryRead = UINT32_MAX;
1195 }
1196 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1197 Assert(GCPtrPC <= UINT32_MAX);
1198 }
1199
1200# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1201 /* Allow interpretation of patch manager code blocks since they can for
1202 instance throw #PFs for perfectly good reasons. */
1203 if (pVCpu->iem.s.fInPatchCode)
1204 {
1205 size_t cbRead = 0;
1206 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1207 AssertRCReturn(rc, rc);
1208 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1209 return VINF_SUCCESS;
1210 }
1211# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1212
1213 RTGCPHYS GCPhys;
1214 uint64_t fFlags;
1215 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1216 if (RT_FAILURE(rc))
1217 {
1218 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1219 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1220 }
1221 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1222 {
1223 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1224 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1225 }
1226 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1227 {
1228 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1229 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1230 }
1231 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1232 /** @todo Check reserved bits and such stuff. PGM is better at doing
1233 * that, so do it when implementing the guest virtual address
1234 * TLB... */
1235
1236# ifdef IEM_VERIFICATION_MODE_FULL
1237 /*
1238 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1239 * instruction.
1240 */
1241 /** @todo optimize this differently by not using PGMPhysRead. */
1242 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1243 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1244 if ( offPrevOpcodes < cbOldOpcodes
1245 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1246 {
1247 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1248 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1249 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1250 pVCpu->iem.s.cbOpcode = cbNew;
1251 return VINF_SUCCESS;
1252 }
1253# endif
1254
1255 /*
1256 * Read the bytes at this address.
1257 */
1258 PVM pVM = pVCpu->CTX_SUFF(pVM);
1259# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1260 size_t cbActual;
1261 if ( PATMIsEnabled(pVM)
1262 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1263 {
1264 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1265 Assert(cbActual > 0);
1266 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1267 }
1268 else
1269# endif
1270 {
1271 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1272 if (cbToTryRead > cbLeftOnPage)
1273 cbToTryRead = cbLeftOnPage;
1274 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1275 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1276
1277 if (!pVCpu->iem.s.fBypassHandlers)
1278 {
1279 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1280 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1281 { /* likely */ }
1282 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1283 {
1284 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1285 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1286 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1287 }
1288 else
1289 {
1290 Log((RT_SUCCESS(rcStrict)
1291 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1292 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1293 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1294 return rcStrict;
1295 }
1296 }
1297 else
1298 {
1299 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1300 if (RT_SUCCESS(rc))
1301 { /* likely */ }
1302 else
1303 {
1304 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1305 GCPtrPC, GCPhys, rc, cbToTryRead));
1306 return rc;
1307 }
1308 }
1309 pVCpu->iem.s.cbOpcode = cbToTryRead;
1310 }
1311#endif /* !IEM_WITH_CODE_TLB */
1312 return VINF_SUCCESS;
1313}
1314
1315
1316/**
1317 * Invalidates the IEM TLBs.
1318 *
1319 * This is called internally as well as by PGM when moving GC mappings.
1320 *
1321 * @returns
1322 * @param pVCpu The cross context virtual CPU structure of the calling
1323 * thread.
1324 * @param fVmm Set when PGM calls us with a remapping.
1325 */
1326VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1327{
1328#ifdef IEM_WITH_CODE_TLB
1329 pVCpu->iem.s.cbInstrBufTotal = 0;
1330 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1331 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1332 { /* very likely */ }
1333 else
1334 {
1335 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1336 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1337 while (i-- > 0)
1338 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1339 }
1340#endif
1341
1342#ifdef IEM_WITH_DATA_TLB
1343 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1344 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1345 { /* very likely */ }
1346 else
1347 {
1348 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1349 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1350 while (i-- > 0)
1351 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1352 }
1353#endif
1354 NOREF(pVCpu); NOREF(fVmm);
1355}
1356
1357
1358/**
1359 * Invalidates a page in the TLBs.
1360 *
1361 * @param pVCpu The cross context virtual CPU structure of the calling
1362 * thread.
1363 * @param GCPtr The address of the page to invalidate
1364 */
1365VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1366{
1367#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1368 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1369 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1370 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1371 uintptr_t idx = (uint8_t)GCPtr;
1372
1373# ifdef IEM_WITH_CODE_TLB
1374 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1375 {
1376 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1377 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1378 pVCpu->iem.s.cbInstrBufTotal = 0;
1379 }
1380# endif
1381
1382# ifdef IEM_WITH_DATA_TLB
1383 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1384 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1385# endif
1386#else
1387 NOREF(pVCpu); NOREF(GCPtr);
1388#endif
1389}
1390
1391
1392/**
1393 * Invalidates the host physical aspects of the IEM TLBs.
1394 *
1395 * This is called internally as well as by PGM when moving GC mappings.
1396 *
1397 * @param pVCpu The cross context virtual CPU structure of the calling
1398 * thread.
1399 */
1400VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1401{
1402#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1403 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1404
1405# ifdef IEM_WITH_CODE_TLB
1406 pVCpu->iem.s.cbInstrBufTotal = 0;
1407# endif
1408 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1409 if (uTlbPhysRev != 0)
1410 {
1411 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1412 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1413 }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1417 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1418
1419 unsigned i;
1420# ifdef IEM_WITH_CODE_TLB
1421 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1422 while (i-- > 0)
1423 {
1424 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1425 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1426 }
1427# endif
1428# ifdef IEM_WITH_DATA_TLB
1429 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1430 while (i-- > 0)
1431 {
1432 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1433 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1434 }
1435# endif
1436 }
1437#else
1438 NOREF(pVCpu);
1439#endif
1440}
1441
1442
1443/**
1444 * Invalidates the host physical aspects of the IEM TLBs.
1445 *
1446 * This is called internally as well as by PGM when moving GC mappings.
1447 *
1448 * @param pVM The cross context VM structure.
1449 *
1450 * @remarks Caller holds the PGM lock.
1451 */
1452VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1453{
1454 RT_NOREF_PV(pVM);
1455}
1456
1457#ifdef IEM_WITH_CODE_TLB
1458
1459/**
1460 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1461 * failure and jumps.
1462 *
1463 * We end up here for a number of reasons:
1464 * - pbInstrBuf isn't yet initialized.
1465 * - Advancing beyond the buffer boundrary (e.g. cross page).
1466 * - Advancing beyond the CS segment limit.
1467 * - Fetching from non-mappable page (e.g. MMIO).
1468 *
1469 * @param pVCpu The cross context virtual CPU structure of the
1470 * calling thread.
1471 * @param pvDst Where to return the bytes.
1472 * @param cbDst Number of bytes to read.
1473 *
1474 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1475 */
1476IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1477{
1478#ifdef IN_RING3
1479//__debugbreak();
1480#else
1481 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1482#endif
1483 for (;;)
1484 {
1485 Assert(cbDst <= 8);
1486 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1487
1488 /*
1489 * We might have a partial buffer match, deal with that first to make the
1490 * rest simpler. This is the first part of the cross page/buffer case.
1491 */
1492 if (pVCpu->iem.s.pbInstrBuf != NULL)
1493 {
1494 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1495 {
1496 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1497 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1498 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1499
1500 cbDst -= cbCopy;
1501 pvDst = (uint8_t *)pvDst + cbCopy;
1502 offBuf += cbCopy;
1503 pVCpu->iem.s.offInstrNextByte += offBuf;
1504 }
1505 }
1506
1507 /*
1508 * Check segment limit, figuring how much we're allowed to access at this point.
1509 *
1510 * We will fault immediately if RIP is past the segment limit / in non-canonical
1511 * territory. If we do continue, there are one or more bytes to read before we
1512 * end up in trouble and we need to do that first before faulting.
1513 */
1514 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1515 RTGCPTR GCPtrFirst;
1516 uint32_t cbMaxRead;
1517 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1518 {
1519 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1520 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1521 { /* likely */ }
1522 else
1523 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1524 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1525 }
1526 else
1527 {
1528 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1529 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1530 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1531 { /* likely */ }
1532 else
1533 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1534 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1535 if (cbMaxRead != 0)
1536 { /* likely */ }
1537 else
1538 {
1539 /* Overflowed because address is 0 and limit is max. */
1540 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1541 cbMaxRead = X86_PAGE_SIZE;
1542 }
1543 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1544 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1545 if (cbMaxRead2 < cbMaxRead)
1546 cbMaxRead = cbMaxRead2;
1547 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1548 }
1549
1550 /*
1551 * Get the TLB entry for this piece of code.
1552 */
1553 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1556 if (pTlbe->uTag == uTag)
1557 {
1558 /* likely when executing lots of code, otherwise unlikely */
1559# ifdef VBOX_WITH_STATISTICS
1560 pVCpu->iem.s.CodeTlb.cTlbHits++;
1561# endif
1562 }
1563 else
1564 {
1565 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1566# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1567 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1568 {
1569 pTlbe->uTag = uTag;
1570 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1571 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1572 pTlbe->GCPhys = NIL_RTGCPHYS;
1573 pTlbe->pbMappingR3 = NULL;
1574 }
1575 else
1576# endif
1577 {
1578 RTGCPHYS GCPhys;
1579 uint64_t fFlags;
1580 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1581 if (RT_FAILURE(rc))
1582 {
1583 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1584 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1585 }
1586
1587 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1588 pTlbe->uTag = uTag;
1589 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1590 pTlbe->GCPhys = GCPhys;
1591 pTlbe->pbMappingR3 = NULL;
1592 }
1593 }
1594
1595 /*
1596 * Check TLB page table level access flags.
1597 */
1598 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1599 {
1600 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1601 {
1602 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1603 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1604 }
1605 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1606 {
1607 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1608 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1609 }
1610 }
1611
1612# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1613 /*
1614 * Allow interpretation of patch manager code blocks since they can for
1615 * instance throw #PFs for perfectly good reasons.
1616 */
1617 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1618 { /* no unlikely */ }
1619 else
1620 {
1621 /** @todo Could be optimized this a little in ring-3 if we liked. */
1622 size_t cbRead = 0;
1623 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1624 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1625 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1626 return;
1627 }
1628# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1629
1630 /*
1631 * Look up the physical page info if necessary.
1632 */
1633 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1634 { /* not necessary */ }
1635 else
1636 {
1637 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1638 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1639 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1640 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1641 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1642 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1643 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1644 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1645 }
1646
1647# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1648 /*
1649 * Try do a direct read using the pbMappingR3 pointer.
1650 */
1651 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1652 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1653 {
1654 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1655 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1656 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1657 {
1658 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1659 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1660 }
1661 else
1662 {
1663 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1664 Assert(cbInstr < cbMaxRead);
1665 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1666 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1667 }
1668 if (cbDst <= cbMaxRead)
1669 {
1670 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1671 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1672 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1673 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1674 return;
1675 }
1676 pVCpu->iem.s.pbInstrBuf = NULL;
1677
1678 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1679 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1680 }
1681 else
1682# endif
1683#if 0
1684 /*
1685 * If there is no special read handling, so we can read a bit more and
1686 * put it in the prefetch buffer.
1687 */
1688 if ( cbDst < cbMaxRead
1689 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1690 {
1691 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1692 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1693 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1694 { /* likely */ }
1695 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1696 {
1697 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1698 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1699 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1700 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1701 }
1702 else
1703 {
1704 Log((RT_SUCCESS(rcStrict)
1705 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1706 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1707 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1708 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1709 }
1710 }
1711 /*
1712 * Special read handling, so only read exactly what's needed.
1713 * This is a highly unlikely scenario.
1714 */
1715 else
1716#endif
1717 {
1718 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1719 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1720 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1721 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1722 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1723 { /* likely */ }
1724 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1725 {
1726 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1727 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1728 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1729 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1730 }
1731 else
1732 {
1733 Log((RT_SUCCESS(rcStrict)
1734 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1735 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1736 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1737 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1738 }
1739 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1740 if (cbToRead == cbDst)
1741 return;
1742 }
1743
1744 /*
1745 * More to read, loop.
1746 */
1747 cbDst -= cbMaxRead;
1748 pvDst = (uint8_t *)pvDst + cbMaxRead;
1749 }
1750}
1751
1752#else
1753
1754/**
1755 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1756 * exception if it fails.
1757 *
1758 * @returns Strict VBox status code.
1759 * @param pVCpu The cross context virtual CPU structure of the
1760 * calling thread.
1761 * @param cbMin The minimum number of bytes relative offOpcode
1762 * that must be read.
1763 */
1764IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1765{
1766 /*
1767 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1768 *
1769 * First translate CS:rIP to a physical address.
1770 */
1771 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1772 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1773 uint32_t cbToTryRead;
1774 RTGCPTR GCPtrNext;
1775 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1776 {
1777 cbToTryRead = PAGE_SIZE;
1778 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1779 if (!IEM_IS_CANONICAL(GCPtrNext))
1780 return iemRaiseGeneralProtectionFault0(pVCpu);
1781 }
1782 else
1783 {
1784 uint32_t GCPtrNext32 = pCtx->eip;
1785 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1786 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1787 if (GCPtrNext32 > pCtx->cs.u32Limit)
1788 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1789 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1790 if (!cbToTryRead) /* overflowed */
1791 {
1792 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1793 cbToTryRead = UINT32_MAX;
1794 /** @todo check out wrapping around the code segment. */
1795 }
1796 if (cbToTryRead < cbMin - cbLeft)
1797 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1798 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1799 }
1800
1801 /* Only read up to the end of the page, and make sure we don't read more
1802 than the opcode buffer can hold. */
1803 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1804 if (cbToTryRead > cbLeftOnPage)
1805 cbToTryRead = cbLeftOnPage;
1806 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1807 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1808/** @todo r=bird: Convert assertion into undefined opcode exception? */
1809 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1810
1811# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1812 /* Allow interpretation of patch manager code blocks since they can for
1813 instance throw #PFs for perfectly good reasons. */
1814 if (pVCpu->iem.s.fInPatchCode)
1815 {
1816 size_t cbRead = 0;
1817 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1818 AssertRCReturn(rc, rc);
1819 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1820 return VINF_SUCCESS;
1821 }
1822# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1823
1824 RTGCPHYS GCPhys;
1825 uint64_t fFlags;
1826 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1827 if (RT_FAILURE(rc))
1828 {
1829 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1830 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1831 }
1832 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1833 {
1834 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1835 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1836 }
1837 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1840 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1841 }
1842 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1843 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1844 /** @todo Check reserved bits and such stuff. PGM is better at doing
1845 * that, so do it when implementing the guest virtual address
1846 * TLB... */
1847
1848 /*
1849 * Read the bytes at this address.
1850 *
1851 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1852 * and since PATM should only patch the start of an instruction there
1853 * should be no need to check again here.
1854 */
1855 if (!pVCpu->iem.s.fBypassHandlers)
1856 {
1857 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1858 cbToTryRead, PGMACCESSORIGIN_IEM);
1859 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1860 { /* likely */ }
1861 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1862 {
1863 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1864 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1865 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1866 }
1867 else
1868 {
1869 Log((RT_SUCCESS(rcStrict)
1870 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1871 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1872 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1873 return rcStrict;
1874 }
1875 }
1876 else
1877 {
1878 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1879 if (RT_SUCCESS(rc))
1880 { /* likely */ }
1881 else
1882 {
1883 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1884 return rc;
1885 }
1886 }
1887 pVCpu->iem.s.cbOpcode += cbToTryRead;
1888 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1889
1890 return VINF_SUCCESS;
1891}
1892
1893#endif /* !IEM_WITH_CODE_TLB */
1894#ifndef IEM_WITH_SETJMP
1895
1896/**
1897 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1898 *
1899 * @returns Strict VBox status code.
1900 * @param pVCpu The cross context virtual CPU structure of the
1901 * calling thread.
1902 * @param pb Where to return the opcode byte.
1903 */
1904DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1905{
1906 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1907 if (rcStrict == VINF_SUCCESS)
1908 {
1909 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1910 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1911 pVCpu->iem.s.offOpcode = offOpcode + 1;
1912 }
1913 else
1914 *pb = 0;
1915 return rcStrict;
1916}
1917
1918
1919/**
1920 * Fetches the next opcode byte.
1921 *
1922 * @returns Strict VBox status code.
1923 * @param pVCpu The cross context virtual CPU structure of the
1924 * calling thread.
1925 * @param pu8 Where to return the opcode byte.
1926 */
1927DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1928{
1929 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1930 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1931 {
1932 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1933 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1934 return VINF_SUCCESS;
1935 }
1936 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1937}
1938
1939#else /* IEM_WITH_SETJMP */
1940
1941/**
1942 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1943 *
1944 * @returns The opcode byte.
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 */
1947DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1948{
1949# ifdef IEM_WITH_CODE_TLB
1950 uint8_t u8;
1951 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1952 return u8;
1953# else
1954 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1955 if (rcStrict == VINF_SUCCESS)
1956 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1958# endif
1959}
1960
1961
1962/**
1963 * Fetches the next opcode byte, longjmp on error.
1964 *
1965 * @returns The opcode byte.
1966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1967 */
1968DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1969{
1970# ifdef IEM_WITH_CODE_TLB
1971 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1972 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1973 if (RT_LIKELY( pbBuf != NULL
1974 && offBuf < pVCpu->iem.s.cbInstrBuf))
1975 {
1976 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1977 return pbBuf[offBuf];
1978 }
1979# else
1980 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1981 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1982 {
1983 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1984 return pVCpu->iem.s.abOpcode[offOpcode];
1985 }
1986# endif
1987 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1988}
1989
1990#endif /* IEM_WITH_SETJMP */
1991
1992/**
1993 * Fetches the next opcode byte, returns automatically on failure.
1994 *
1995 * @param a_pu8 Where to return the opcode byte.
1996 * @remark Implicitly references pVCpu.
1997 */
1998#ifndef IEM_WITH_SETJMP
1999# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2000 do \
2001 { \
2002 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2003 if (rcStrict2 == VINF_SUCCESS) \
2004 { /* likely */ } \
2005 else \
2006 return rcStrict2; \
2007 } while (0)
2008#else
2009# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2010#endif /* IEM_WITH_SETJMP */
2011
2012
2013#ifndef IEM_WITH_SETJMP
2014/**
2015 * Fetches the next signed byte from the opcode stream.
2016 *
2017 * @returns Strict VBox status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param pi8 Where to return the signed byte.
2020 */
2021DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2022{
2023 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2024}
2025#endif /* !IEM_WITH_SETJMP */
2026
2027
2028/**
2029 * Fetches the next signed byte from the opcode stream, returning automatically
2030 * on failure.
2031 *
2032 * @param a_pi8 Where to return the signed byte.
2033 * @remark Implicitly references pVCpu.
2034 */
2035#ifndef IEM_WITH_SETJMP
2036# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2037 do \
2038 { \
2039 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2040 if (rcStrict2 != VINF_SUCCESS) \
2041 return rcStrict2; \
2042 } while (0)
2043#else /* IEM_WITH_SETJMP */
2044# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2045
2046#endif /* IEM_WITH_SETJMP */
2047
2048#ifndef IEM_WITH_SETJMP
2049
2050/**
2051 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2052 *
2053 * @returns Strict VBox status code.
2054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2055 * @param pu16 Where to return the opcode dword.
2056 */
2057DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2058{
2059 uint8_t u8;
2060 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2061 if (rcStrict == VINF_SUCCESS)
2062 *pu16 = (int8_t)u8;
2063 return rcStrict;
2064}
2065
2066
2067/**
2068 * Fetches the next signed byte from the opcode stream, extending it to
2069 * unsigned 16-bit.
2070 *
2071 * @returns Strict VBox status code.
2072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2073 * @param pu16 Where to return the unsigned word.
2074 */
2075DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2076{
2077 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2078 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2079 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2080
2081 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2082 pVCpu->iem.s.offOpcode = offOpcode + 1;
2083 return VINF_SUCCESS;
2084}
2085
2086#endif /* !IEM_WITH_SETJMP */
2087
2088/**
2089 * Fetches the next signed byte from the opcode stream and sign-extending it to
2090 * a word, returning automatically on failure.
2091 *
2092 * @param a_pu16 Where to return the word.
2093 * @remark Implicitly references pVCpu.
2094 */
2095#ifndef IEM_WITH_SETJMP
2096# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2097 do \
2098 { \
2099 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2100 if (rcStrict2 != VINF_SUCCESS) \
2101 return rcStrict2; \
2102 } while (0)
2103#else
2104# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2105#endif
2106
2107#ifndef IEM_WITH_SETJMP
2108
2109/**
2110 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2111 *
2112 * @returns Strict VBox status code.
2113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2114 * @param pu32 Where to return the opcode dword.
2115 */
2116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2117{
2118 uint8_t u8;
2119 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2120 if (rcStrict == VINF_SUCCESS)
2121 *pu32 = (int8_t)u8;
2122 return rcStrict;
2123}
2124
2125
2126/**
2127 * Fetches the next signed byte from the opcode stream, extending it to
2128 * unsigned 32-bit.
2129 *
2130 * @returns Strict VBox status code.
2131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2132 * @param pu32 Where to return the unsigned dword.
2133 */
2134DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2135{
2136 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2137 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2138 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2139
2140 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2141 pVCpu->iem.s.offOpcode = offOpcode + 1;
2142 return VINF_SUCCESS;
2143}
2144
2145#endif /* !IEM_WITH_SETJMP */
2146
2147/**
2148 * Fetches the next signed byte from the opcode stream and sign-extending it to
2149 * a word, returning automatically on failure.
2150 *
2151 * @param a_pu32 Where to return the word.
2152 * @remark Implicitly references pVCpu.
2153 */
2154#ifndef IEM_WITH_SETJMP
2155#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2156 do \
2157 { \
2158 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2159 if (rcStrict2 != VINF_SUCCESS) \
2160 return rcStrict2; \
2161 } while (0)
2162#else
2163# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2164#endif
2165
2166#ifndef IEM_WITH_SETJMP
2167
2168/**
2169 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2170 *
2171 * @returns Strict VBox status code.
2172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2173 * @param pu64 Where to return the opcode qword.
2174 */
2175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2176{
2177 uint8_t u8;
2178 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2179 if (rcStrict == VINF_SUCCESS)
2180 *pu64 = (int8_t)u8;
2181 return rcStrict;
2182}
2183
2184
2185/**
2186 * Fetches the next signed byte from the opcode stream, extending it to
2187 * unsigned 64-bit.
2188 *
2189 * @returns Strict VBox status code.
2190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2191 * @param pu64 Where to return the unsigned qword.
2192 */
2193DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2194{
2195 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2196 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2197 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2198
2199 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2200 pVCpu->iem.s.offOpcode = offOpcode + 1;
2201 return VINF_SUCCESS;
2202}
2203
2204#endif /* !IEM_WITH_SETJMP */
2205
2206
2207/**
2208 * Fetches the next signed byte from the opcode stream and sign-extending it to
2209 * a word, returning automatically on failure.
2210 *
2211 * @param a_pu64 Where to return the word.
2212 * @remark Implicitly references pVCpu.
2213 */
2214#ifndef IEM_WITH_SETJMP
2215# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2216 do \
2217 { \
2218 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2219 if (rcStrict2 != VINF_SUCCESS) \
2220 return rcStrict2; \
2221 } while (0)
2222#else
2223# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2224#endif
2225
2226
2227#ifndef IEM_WITH_SETJMP
2228
2229/**
2230 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2231 *
2232 * @returns Strict VBox status code.
2233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2234 * @param pu16 Where to return the opcode word.
2235 */
2236DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2237{
2238 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2239 if (rcStrict == VINF_SUCCESS)
2240 {
2241 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2242# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2243 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2244# else
2245 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2246# endif
2247 pVCpu->iem.s.offOpcode = offOpcode + 2;
2248 }
2249 else
2250 *pu16 = 0;
2251 return rcStrict;
2252}
2253
2254
2255/**
2256 * Fetches the next opcode word.
2257 *
2258 * @returns Strict VBox status code.
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param pu16 Where to return the opcode word.
2261 */
2262DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2263{
2264 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2265 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2266 {
2267 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2268# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2269 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2270# else
2271 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2272# endif
2273 return VINF_SUCCESS;
2274 }
2275 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2276}
2277
2278#else /* IEM_WITH_SETJMP */
2279
2280/**
2281 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2282 *
2283 * @returns The opcode word.
2284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2285 */
2286DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2287{
2288# ifdef IEM_WITH_CODE_TLB
2289 uint16_t u16;
2290 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2291 return u16;
2292# else
2293 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2294 if (rcStrict == VINF_SUCCESS)
2295 {
2296 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2297 pVCpu->iem.s.offOpcode += 2;
2298# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2299 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2300# else
2301 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2302# endif
2303 }
2304 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2305# endif
2306}
2307
2308
2309/**
2310 * Fetches the next opcode word, longjmp on error.
2311 *
2312 * @returns The opcode word.
2313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2314 */
2315DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2316{
2317# ifdef IEM_WITH_CODE_TLB
2318 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2319 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2320 if (RT_LIKELY( pbBuf != NULL
2321 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2322 {
2323 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 return *(uint16_t const *)&pbBuf[offBuf];
2326# else
2327 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2328# endif
2329 }
2330# else
2331 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2332 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2333 {
2334 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2335# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2336 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2337# else
2338 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2339# endif
2340 }
2341# endif
2342 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2343}
2344
2345#endif /* IEM_WITH_SETJMP */
2346
2347
2348/**
2349 * Fetches the next opcode word, returns automatically on failure.
2350 *
2351 * @param a_pu16 Where to return the opcode word.
2352 * @remark Implicitly references pVCpu.
2353 */
2354#ifndef IEM_WITH_SETJMP
2355# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2356 do \
2357 { \
2358 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2359 if (rcStrict2 != VINF_SUCCESS) \
2360 return rcStrict2; \
2361 } while (0)
2362#else
2363# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2364#endif
2365
2366#ifndef IEM_WITH_SETJMP
2367
2368/**
2369 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2370 *
2371 * @returns Strict VBox status code.
2372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2373 * @param pu32 Where to return the opcode double word.
2374 */
2375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2376{
2377 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2378 if (rcStrict == VINF_SUCCESS)
2379 {
2380 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2381 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2382 pVCpu->iem.s.offOpcode = offOpcode + 2;
2383 }
2384 else
2385 *pu32 = 0;
2386 return rcStrict;
2387}
2388
2389
2390/**
2391 * Fetches the next opcode word, zero extending it to a double word.
2392 *
2393 * @returns Strict VBox status code.
2394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2395 * @param pu32 Where to return the opcode double word.
2396 */
2397DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2398{
2399 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2400 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2401 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2402
2403 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2404 pVCpu->iem.s.offOpcode = offOpcode + 2;
2405 return VINF_SUCCESS;
2406}
2407
2408#endif /* !IEM_WITH_SETJMP */
2409
2410
2411/**
2412 * Fetches the next opcode word and zero extends it to a double word, returns
2413 * automatically on failure.
2414 *
2415 * @param a_pu32 Where to return the opcode double word.
2416 * @remark Implicitly references pVCpu.
2417 */
2418#ifndef IEM_WITH_SETJMP
2419# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2420 do \
2421 { \
2422 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2423 if (rcStrict2 != VINF_SUCCESS) \
2424 return rcStrict2; \
2425 } while (0)
2426#else
2427# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2428#endif
2429
2430#ifndef IEM_WITH_SETJMP
2431
2432/**
2433 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2434 *
2435 * @returns Strict VBox status code.
2436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2437 * @param pu64 Where to return the opcode quad word.
2438 */
2439DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2440{
2441 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2442 if (rcStrict == VINF_SUCCESS)
2443 {
2444 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2445 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2446 pVCpu->iem.s.offOpcode = offOpcode + 2;
2447 }
2448 else
2449 *pu64 = 0;
2450 return rcStrict;
2451}
2452
2453
2454/**
2455 * Fetches the next opcode word, zero extending it to a quad word.
2456 *
2457 * @returns Strict VBox status code.
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pu64 Where to return the opcode quad word.
2460 */
2461DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2462{
2463 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2464 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2465 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2466
2467 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2468 pVCpu->iem.s.offOpcode = offOpcode + 2;
2469 return VINF_SUCCESS;
2470}
2471
2472#endif /* !IEM_WITH_SETJMP */
2473
2474/**
2475 * Fetches the next opcode word and zero extends it to a quad word, returns
2476 * automatically on failure.
2477 *
2478 * @param a_pu64 Where to return the opcode quad word.
2479 * @remark Implicitly references pVCpu.
2480 */
2481#ifndef IEM_WITH_SETJMP
2482# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2483 do \
2484 { \
2485 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2486 if (rcStrict2 != VINF_SUCCESS) \
2487 return rcStrict2; \
2488 } while (0)
2489#else
2490# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2491#endif
2492
2493
2494#ifndef IEM_WITH_SETJMP
2495/**
2496 * Fetches the next signed word from the opcode stream.
2497 *
2498 * @returns Strict VBox status code.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 * @param pi16 Where to return the signed word.
2501 */
2502DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2503{
2504 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2505}
2506#endif /* !IEM_WITH_SETJMP */
2507
2508
2509/**
2510 * Fetches the next signed word from the opcode stream, returning automatically
2511 * on failure.
2512 *
2513 * @param a_pi16 Where to return the signed word.
2514 * @remark Implicitly references pVCpu.
2515 */
2516#ifndef IEM_WITH_SETJMP
2517# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2518 do \
2519 { \
2520 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2521 if (rcStrict2 != VINF_SUCCESS) \
2522 return rcStrict2; \
2523 } while (0)
2524#else
2525# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2526#endif
2527
2528#ifndef IEM_WITH_SETJMP
2529
2530/**
2531 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2532 *
2533 * @returns Strict VBox status code.
2534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2535 * @param pu32 Where to return the opcode dword.
2536 */
2537DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2538{
2539 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2540 if (rcStrict == VINF_SUCCESS)
2541 {
2542 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2543# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2544 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2545# else
2546 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2547 pVCpu->iem.s.abOpcode[offOpcode + 1],
2548 pVCpu->iem.s.abOpcode[offOpcode + 2],
2549 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2550# endif
2551 pVCpu->iem.s.offOpcode = offOpcode + 4;
2552 }
2553 else
2554 *pu32 = 0;
2555 return rcStrict;
2556}
2557
2558
2559/**
2560 * Fetches the next opcode dword.
2561 *
2562 * @returns Strict VBox status code.
2563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2564 * @param pu32 Where to return the opcode double word.
2565 */
2566DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2567{
2568 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2569 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2570 {
2571 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2572# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2573 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2574# else
2575 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2576 pVCpu->iem.s.abOpcode[offOpcode + 1],
2577 pVCpu->iem.s.abOpcode[offOpcode + 2],
2578 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2579# endif
2580 return VINF_SUCCESS;
2581 }
2582 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2583}
2584
2585#else /* !IEM_WITH_SETJMP */
2586
2587/**
2588 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2589 *
2590 * @returns The opcode dword.
2591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2592 */
2593DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2594{
2595# ifdef IEM_WITH_CODE_TLB
2596 uint32_t u32;
2597 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2598 return u32;
2599# else
2600 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2601 if (rcStrict == VINF_SUCCESS)
2602 {
2603 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2604 pVCpu->iem.s.offOpcode = offOpcode + 4;
2605# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2606 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2607# else
2608 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2609 pVCpu->iem.s.abOpcode[offOpcode + 1],
2610 pVCpu->iem.s.abOpcode[offOpcode + 2],
2611 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2612# endif
2613 }
2614 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2615# endif
2616}
2617
2618
2619/**
2620 * Fetches the next opcode dword, longjmp on error.
2621 *
2622 * @returns The opcode dword.
2623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2624 */
2625DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2626{
2627# ifdef IEM_WITH_CODE_TLB
2628 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2629 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2630 if (RT_LIKELY( pbBuf != NULL
2631 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2632 {
2633 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2634# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2635 return *(uint32_t const *)&pbBuf[offBuf];
2636# else
2637 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2638 pbBuf[offBuf + 1],
2639 pbBuf[offBuf + 2],
2640 pbBuf[offBuf + 3]);
2641# endif
2642 }
2643# else
2644 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2645 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2646 {
2647 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2649 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2650# else
2651 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2652 pVCpu->iem.s.abOpcode[offOpcode + 1],
2653 pVCpu->iem.s.abOpcode[offOpcode + 2],
2654 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2655# endif
2656 }
2657# endif
2658 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663
2664/**
2665 * Fetches the next opcode dword, returns automatically on failure.
2666 *
2667 * @param a_pu32 Where to return the opcode dword.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2680#endif
2681
2682#ifndef IEM_WITH_SETJMP
2683
2684/**
2685 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pu64 Where to return the opcode dword.
2690 */
2691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2692{
2693 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2694 if (rcStrict == VINF_SUCCESS)
2695 {
2696 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2697 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2698 pVCpu->iem.s.abOpcode[offOpcode + 1],
2699 pVCpu->iem.s.abOpcode[offOpcode + 2],
2700 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2701 pVCpu->iem.s.offOpcode = offOpcode + 4;
2702 }
2703 else
2704 *pu64 = 0;
2705 return rcStrict;
2706}
2707
2708
2709/**
2710 * Fetches the next opcode dword, zero extending it to a quad word.
2711 *
2712 * @returns Strict VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2714 * @param pu64 Where to return the opcode quad word.
2715 */
2716DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2717{
2718 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2719 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2720 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2721
2722 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2723 pVCpu->iem.s.abOpcode[offOpcode + 1],
2724 pVCpu->iem.s.abOpcode[offOpcode + 2],
2725 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2726 pVCpu->iem.s.offOpcode = offOpcode + 4;
2727 return VINF_SUCCESS;
2728}
2729
2730#endif /* !IEM_WITH_SETJMP */
2731
2732
2733/**
2734 * Fetches the next opcode dword and zero extends it to a quad word, returns
2735 * automatically on failure.
2736 *
2737 * @param a_pu64 Where to return the opcode quad word.
2738 * @remark Implicitly references pVCpu.
2739 */
2740#ifndef IEM_WITH_SETJMP
2741# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2742 do \
2743 { \
2744 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2745 if (rcStrict2 != VINF_SUCCESS) \
2746 return rcStrict2; \
2747 } while (0)
2748#else
2749# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2750#endif
2751
2752
2753#ifndef IEM_WITH_SETJMP
2754/**
2755 * Fetches the next signed double word from the opcode stream.
2756 *
2757 * @returns Strict VBox status code.
2758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2759 * @param pi32 Where to return the signed double word.
2760 */
2761DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2762{
2763 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2764}
2765#endif
2766
2767/**
2768 * Fetches the next signed double word from the opcode stream, returning
2769 * automatically on failure.
2770 *
2771 * @param a_pi32 Where to return the signed double word.
2772 * @remark Implicitly references pVCpu.
2773 */
2774#ifndef IEM_WITH_SETJMP
2775# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2776 do \
2777 { \
2778 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2779 if (rcStrict2 != VINF_SUCCESS) \
2780 return rcStrict2; \
2781 } while (0)
2782#else
2783# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2784#endif
2785
2786#ifndef IEM_WITH_SETJMP
2787
2788/**
2789 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2790 *
2791 * @returns Strict VBox status code.
2792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2793 * @param pu64 Where to return the opcode qword.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2796{
2797 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2798 if (rcStrict == VINF_SUCCESS)
2799 {
2800 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2801 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2802 pVCpu->iem.s.abOpcode[offOpcode + 1],
2803 pVCpu->iem.s.abOpcode[offOpcode + 2],
2804 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2805 pVCpu->iem.s.offOpcode = offOpcode + 4;
2806 }
2807 else
2808 *pu64 = 0;
2809 return rcStrict;
2810}
2811
2812
2813/**
2814 * Fetches the next opcode dword, sign extending it into a quad word.
2815 *
2816 * @returns Strict VBox status code.
2817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2818 * @param pu64 Where to return the opcode quad word.
2819 */
2820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2821{
2822 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2823 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2824 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2825
2826 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2827 pVCpu->iem.s.abOpcode[offOpcode + 1],
2828 pVCpu->iem.s.abOpcode[offOpcode + 2],
2829 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2830 *pu64 = i32;
2831 pVCpu->iem.s.offOpcode = offOpcode + 4;
2832 return VINF_SUCCESS;
2833}
2834
2835#endif /* !IEM_WITH_SETJMP */
2836
2837
2838/**
2839 * Fetches the next opcode double word and sign extends it to a quad word,
2840 * returns automatically on failure.
2841 *
2842 * @param a_pu64 Where to return the opcode quad word.
2843 * @remark Implicitly references pVCpu.
2844 */
2845#ifndef IEM_WITH_SETJMP
2846# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2847 do \
2848 { \
2849 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2850 if (rcStrict2 != VINF_SUCCESS) \
2851 return rcStrict2; \
2852 } while (0)
2853#else
2854# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2855#endif
2856
2857#ifndef IEM_WITH_SETJMP
2858
2859/**
2860 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2861 *
2862 * @returns Strict VBox status code.
2863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2864 * @param pu64 Where to return the opcode qword.
2865 */
2866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2867{
2868 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2869 if (rcStrict == VINF_SUCCESS)
2870 {
2871 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2872# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2873 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2874# else
2875 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2876 pVCpu->iem.s.abOpcode[offOpcode + 1],
2877 pVCpu->iem.s.abOpcode[offOpcode + 2],
2878 pVCpu->iem.s.abOpcode[offOpcode + 3],
2879 pVCpu->iem.s.abOpcode[offOpcode + 4],
2880 pVCpu->iem.s.abOpcode[offOpcode + 5],
2881 pVCpu->iem.s.abOpcode[offOpcode + 6],
2882 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2883# endif
2884 pVCpu->iem.s.offOpcode = offOpcode + 8;
2885 }
2886 else
2887 *pu64 = 0;
2888 return rcStrict;
2889}
2890
2891
2892/**
2893 * Fetches the next opcode qword.
2894 *
2895 * @returns Strict VBox status code.
2896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2897 * @param pu64 Where to return the opcode qword.
2898 */
2899DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2900{
2901 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2902 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2903 {
2904# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2905 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2906# else
2907 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3],
2911 pVCpu->iem.s.abOpcode[offOpcode + 4],
2912 pVCpu->iem.s.abOpcode[offOpcode + 5],
2913 pVCpu->iem.s.abOpcode[offOpcode + 6],
2914 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2915# endif
2916 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2917 return VINF_SUCCESS;
2918 }
2919 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2920}
2921
2922#else /* IEM_WITH_SETJMP */
2923
2924/**
2925 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2926 *
2927 * @returns The opcode qword.
2928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2929 */
2930DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2931{
2932# ifdef IEM_WITH_CODE_TLB
2933 uint64_t u64;
2934 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2935 return u64;
2936# else
2937 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2938 if (rcStrict == VINF_SUCCESS)
2939 {
2940 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2941 pVCpu->iem.s.offOpcode = offOpcode + 8;
2942# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2943 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2944# else
2945 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2946 pVCpu->iem.s.abOpcode[offOpcode + 1],
2947 pVCpu->iem.s.abOpcode[offOpcode + 2],
2948 pVCpu->iem.s.abOpcode[offOpcode + 3],
2949 pVCpu->iem.s.abOpcode[offOpcode + 4],
2950 pVCpu->iem.s.abOpcode[offOpcode + 5],
2951 pVCpu->iem.s.abOpcode[offOpcode + 6],
2952 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2953# endif
2954 }
2955 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2956# endif
2957}
2958
2959
2960/**
2961 * Fetches the next opcode qword, longjmp on error.
2962 *
2963 * @returns The opcode qword.
2964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2965 */
2966DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2967{
2968# ifdef IEM_WITH_CODE_TLB
2969 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2970 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2971 if (RT_LIKELY( pbBuf != NULL
2972 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2973 {
2974 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2975# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2976 return *(uint64_t const *)&pbBuf[offBuf];
2977# else
2978 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2979 pbBuf[offBuf + 1],
2980 pbBuf[offBuf + 2],
2981 pbBuf[offBuf + 3],
2982 pbBuf[offBuf + 4],
2983 pbBuf[offBuf + 5],
2984 pbBuf[offBuf + 6],
2985 pbBuf[offBuf + 7]);
2986# endif
2987 }
2988# else
2989 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2990 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2991 {
2992 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2993# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2994 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2995# else
2996 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2997 pVCpu->iem.s.abOpcode[offOpcode + 1],
2998 pVCpu->iem.s.abOpcode[offOpcode + 2],
2999 pVCpu->iem.s.abOpcode[offOpcode + 3],
3000 pVCpu->iem.s.abOpcode[offOpcode + 4],
3001 pVCpu->iem.s.abOpcode[offOpcode + 5],
3002 pVCpu->iem.s.abOpcode[offOpcode + 6],
3003 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3004# endif
3005 }
3006# endif
3007 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3008}
3009
3010#endif /* IEM_WITH_SETJMP */
3011
3012/**
3013 * Fetches the next opcode quad word, returns automatically on failure.
3014 *
3015 * @param a_pu64 Where to return the opcode quad word.
3016 * @remark Implicitly references pVCpu.
3017 */
3018#ifndef IEM_WITH_SETJMP
3019# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3020 do \
3021 { \
3022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3023 if (rcStrict2 != VINF_SUCCESS) \
3024 return rcStrict2; \
3025 } while (0)
3026#else
3027# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3028#endif
3029
3030
3031/** @name Misc Worker Functions.
3032 * @{
3033 */
3034
3035
3036/**
3037 * Validates a new SS segment.
3038 *
3039 * @returns VBox strict status code.
3040 * @param pVCpu The cross context virtual CPU structure of the
3041 * calling thread.
3042 * @param pCtx The CPU context.
3043 * @param NewSS The new SS selctor.
3044 * @param uCpl The CPL to load the stack for.
3045 * @param pDesc Where to return the descriptor.
3046 */
3047IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3048{
3049 NOREF(pCtx);
3050
3051 /* Null selectors are not allowed (we're not called for dispatching
3052 interrupts with SS=0 in long mode). */
3053 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3054 {
3055 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3056 return iemRaiseTaskSwitchFault0(pVCpu);
3057 }
3058
3059 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3060 if ((NewSS & X86_SEL_RPL) != uCpl)
3061 {
3062 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3063 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3064 }
3065
3066 /*
3067 * Read the descriptor.
3068 */
3069 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3070 if (rcStrict != VINF_SUCCESS)
3071 return rcStrict;
3072
3073 /*
3074 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3075 */
3076 if (!pDesc->Legacy.Gen.u1DescType)
3077 {
3078 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3079 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3080 }
3081
3082 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3083 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3084 {
3085 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3086 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3087 }
3088 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3089 {
3090 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3091 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3092 }
3093
3094 /* Is it there? */
3095 /** @todo testcase: Is this checked before the canonical / limit check below? */
3096 if (!pDesc->Legacy.Gen.u1Present)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3099 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3100 }
3101
3102 return VINF_SUCCESS;
3103}
3104
3105
3106/**
3107 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3108 * not.
3109 *
3110 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3111 * @param a_pCtx The CPU context.
3112 */
3113#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3114# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3115 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3116 ? (a_pCtx)->eflags.u \
3117 : CPUMRawGetEFlags(a_pVCpu) )
3118#else
3119# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3120 ( (a_pCtx)->eflags.u )
3121#endif
3122
3123/**
3124 * Updates the EFLAGS in the correct manner wrt. PATM.
3125 *
3126 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3127 * @param a_pCtx The CPU context.
3128 * @param a_fEfl The new EFLAGS.
3129 */
3130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3131# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3132 do { \
3133 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3134 (a_pCtx)->eflags.u = (a_fEfl); \
3135 else \
3136 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3137 } while (0)
3138#else
3139# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3140 do { \
3141 (a_pCtx)->eflags.u = (a_fEfl); \
3142 } while (0)
3143#endif
3144
3145
3146/** @} */
3147
3148/** @name Raising Exceptions.
3149 *
3150 * @{
3151 */
3152
3153/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3154 * @{ */
3155/** CPU exception. */
3156#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3157/** External interrupt (from PIC, APIC, whatever). */
3158#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3159/** Software interrupt (int or into, not bound).
3160 * Returns to the following instruction */
3161#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3162/** Takes an error code. */
3163#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3164/** Takes a CR2. */
3165#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3166/** Generated by the breakpoint instruction. */
3167#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3168/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3169#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3170/** @} */
3171
3172
3173/**
3174 * Loads the specified stack far pointer from the TSS.
3175 *
3176 * @returns VBox strict status code.
3177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3178 * @param pCtx The CPU context.
3179 * @param uCpl The CPL to load the stack for.
3180 * @param pSelSS Where to return the new stack segment.
3181 * @param puEsp Where to return the new stack pointer.
3182 */
3183IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3184 PRTSEL pSelSS, uint32_t *puEsp)
3185{
3186 VBOXSTRICTRC rcStrict;
3187 Assert(uCpl < 4);
3188
3189 switch (pCtx->tr.Attr.n.u4Type)
3190 {
3191 /*
3192 * 16-bit TSS (X86TSS16).
3193 */
3194 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3195 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3196 {
3197 uint32_t off = uCpl * 4 + 2;
3198 if (off + 4 <= pCtx->tr.u32Limit)
3199 {
3200 /** @todo check actual access pattern here. */
3201 uint32_t u32Tmp = 0; /* gcc maybe... */
3202 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3203 if (rcStrict == VINF_SUCCESS)
3204 {
3205 *puEsp = RT_LOWORD(u32Tmp);
3206 *pSelSS = RT_HIWORD(u32Tmp);
3207 return VINF_SUCCESS;
3208 }
3209 }
3210 else
3211 {
3212 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3213 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3214 }
3215 break;
3216 }
3217
3218 /*
3219 * 32-bit TSS (X86TSS32).
3220 */
3221 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3222 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3223 {
3224 uint32_t off = uCpl * 8 + 4;
3225 if (off + 7 <= pCtx->tr.u32Limit)
3226 {
3227/** @todo check actual access pattern here. */
3228 uint64_t u64Tmp;
3229 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3230 if (rcStrict == VINF_SUCCESS)
3231 {
3232 *puEsp = u64Tmp & UINT32_MAX;
3233 *pSelSS = (RTSEL)(u64Tmp >> 32);
3234 return VINF_SUCCESS;
3235 }
3236 }
3237 else
3238 {
3239 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3240 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3241 }
3242 break;
3243 }
3244
3245 default:
3246 AssertFailed();
3247 rcStrict = VERR_IEM_IPE_4;
3248 break;
3249 }
3250
3251 *puEsp = 0; /* make gcc happy */
3252 *pSelSS = 0; /* make gcc happy */
3253 return rcStrict;
3254}
3255
3256
3257/**
3258 * Loads the specified stack pointer from the 64-bit TSS.
3259 *
3260 * @returns VBox strict status code.
3261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3262 * @param pCtx The CPU context.
3263 * @param uCpl The CPL to load the stack for.
3264 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3265 * @param puRsp Where to return the new stack pointer.
3266 */
3267IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3268{
3269 Assert(uCpl < 4);
3270 Assert(uIst < 8);
3271 *puRsp = 0; /* make gcc happy */
3272
3273 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3274
3275 uint32_t off;
3276 if (uIst)
3277 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3278 else
3279 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3280 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3281 {
3282 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3283 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3284 }
3285
3286 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3287}
3288
3289
3290/**
3291 * Adjust the CPU state according to the exception being raised.
3292 *
3293 * @param pCtx The CPU context.
3294 * @param u8Vector The exception that has been raised.
3295 */
3296DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3297{
3298 switch (u8Vector)
3299 {
3300 case X86_XCPT_DB:
3301 pCtx->dr[7] &= ~X86_DR7_GD;
3302 break;
3303 /** @todo Read the AMD and Intel exception reference... */
3304 }
3305}
3306
3307
3308/**
3309 * Implements exceptions and interrupts for real mode.
3310 *
3311 * @returns VBox strict status code.
3312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3313 * @param pCtx The CPU context.
3314 * @param cbInstr The number of bytes to offset rIP by in the return
3315 * address.
3316 * @param u8Vector The interrupt / exception vector number.
3317 * @param fFlags The flags.
3318 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3319 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3320 */
3321IEM_STATIC VBOXSTRICTRC
3322iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3323 PCPUMCTX pCtx,
3324 uint8_t cbInstr,
3325 uint8_t u8Vector,
3326 uint32_t fFlags,
3327 uint16_t uErr,
3328 uint64_t uCr2)
3329{
3330 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3331 NOREF(uErr); NOREF(uCr2);
3332
3333 /*
3334 * Read the IDT entry.
3335 */
3336 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3337 {
3338 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3339 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3340 }
3341 RTFAR16 Idte;
3342 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3343 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3344 return rcStrict;
3345
3346 /*
3347 * Push the stack frame.
3348 */
3349 uint16_t *pu16Frame;
3350 uint64_t uNewRsp;
3351 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3352 if (rcStrict != VINF_SUCCESS)
3353 return rcStrict;
3354
3355 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3356#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3357 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3358 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3359 fEfl |= UINT16_C(0xf000);
3360#endif
3361 pu16Frame[2] = (uint16_t)fEfl;
3362 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3363 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3364 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3365 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3366 return rcStrict;
3367
3368 /*
3369 * Load the vector address into cs:ip and make exception specific state
3370 * adjustments.
3371 */
3372 pCtx->cs.Sel = Idte.sel;
3373 pCtx->cs.ValidSel = Idte.sel;
3374 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3375 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3376 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3377 pCtx->rip = Idte.off;
3378 fEfl &= ~X86_EFL_IF;
3379 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3380
3381 /** @todo do we actually do this in real mode? */
3382 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3383 iemRaiseXcptAdjustState(pCtx, u8Vector);
3384
3385 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3386}
3387
3388
3389/**
3390 * Loads a NULL data selector into when coming from V8086 mode.
3391 *
3392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3393 * @param pSReg Pointer to the segment register.
3394 */
3395IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3396{
3397 pSReg->Sel = 0;
3398 pSReg->ValidSel = 0;
3399 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3400 {
3401 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3402 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3403 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3404 }
3405 else
3406 {
3407 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3408 /** @todo check this on AMD-V */
3409 pSReg->u64Base = 0;
3410 pSReg->u32Limit = 0;
3411 }
3412}
3413
3414
3415/**
3416 * Loads a segment selector during a task switch in V8086 mode.
3417 *
3418 * @param pSReg Pointer to the segment register.
3419 * @param uSel The selector value to load.
3420 */
3421IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3422{
3423 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3424 pSReg->Sel = uSel;
3425 pSReg->ValidSel = uSel;
3426 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3427 pSReg->u64Base = uSel << 4;
3428 pSReg->u32Limit = 0xffff;
3429 pSReg->Attr.u = 0xf3;
3430}
3431
3432
3433/**
3434 * Loads a NULL data selector into a selector register, both the hidden and
3435 * visible parts, in protected mode.
3436 *
3437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3438 * @param pSReg Pointer to the segment register.
3439 * @param uRpl The RPL.
3440 */
3441IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3442{
3443 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3444 * data selector in protected mode. */
3445 pSReg->Sel = uRpl;
3446 pSReg->ValidSel = uRpl;
3447 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3448 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3449 {
3450 /* VT-x (Intel 3960x) observed doing something like this. */
3451 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3452 pSReg->u32Limit = UINT32_MAX;
3453 pSReg->u64Base = 0;
3454 }
3455 else
3456 {
3457 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3458 pSReg->u32Limit = 0;
3459 pSReg->u64Base = 0;
3460 }
3461}
3462
3463
3464/**
3465 * Loads a segment selector during a task switch in protected mode.
3466 *
3467 * In this task switch scenario, we would throw \#TS exceptions rather than
3468 * \#GPs.
3469 *
3470 * @returns VBox strict status code.
3471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3472 * @param pSReg Pointer to the segment register.
3473 * @param uSel The new selector value.
3474 *
3475 * @remarks This does _not_ handle CS or SS.
3476 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3477 */
3478IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3479{
3480 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3481
3482 /* Null data selector. */
3483 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3484 {
3485 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3486 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3487 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3488 return VINF_SUCCESS;
3489 }
3490
3491 /* Fetch the descriptor. */
3492 IEMSELDESC Desc;
3493 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3494 if (rcStrict != VINF_SUCCESS)
3495 {
3496 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3497 VBOXSTRICTRC_VAL(rcStrict)));
3498 return rcStrict;
3499 }
3500
3501 /* Must be a data segment or readable code segment. */
3502 if ( !Desc.Legacy.Gen.u1DescType
3503 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3504 {
3505 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3506 Desc.Legacy.Gen.u4Type));
3507 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3508 }
3509
3510 /* Check privileges for data segments and non-conforming code segments. */
3511 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3512 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3513 {
3514 /* The RPL and the new CPL must be less than or equal to the DPL. */
3515 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3516 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3517 {
3518 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3519 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3520 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3521 }
3522 }
3523
3524 /* Is it there? */
3525 if (!Desc.Legacy.Gen.u1Present)
3526 {
3527 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3528 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3529 }
3530
3531 /* The base and limit. */
3532 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3533 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3534
3535 /*
3536 * Ok, everything checked out fine. Now set the accessed bit before
3537 * committing the result into the registers.
3538 */
3539 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3540 {
3541 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3542 if (rcStrict != VINF_SUCCESS)
3543 return rcStrict;
3544 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3545 }
3546
3547 /* Commit */
3548 pSReg->Sel = uSel;
3549 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3550 pSReg->u32Limit = cbLimit;
3551 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3552 pSReg->ValidSel = uSel;
3553 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3554 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3555 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3556
3557 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/**
3564 * Performs a task switch.
3565 *
3566 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3567 * caller is responsible for performing the necessary checks (like DPL, TSS
3568 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3569 * reference for JMP, CALL, IRET.
3570 *
3571 * If the task switch is the due to a software interrupt or hardware exception,
3572 * the caller is responsible for validating the TSS selector and descriptor. See
3573 * Intel Instruction reference for INT n.
3574 *
3575 * @returns VBox strict status code.
3576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3577 * @param pCtx The CPU context.
3578 * @param enmTaskSwitch What caused this task switch.
3579 * @param uNextEip The EIP effective after the task switch.
3580 * @param fFlags The flags.
3581 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3582 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3583 * @param SelTSS The TSS selector of the new task.
3584 * @param pNewDescTSS Pointer to the new TSS descriptor.
3585 */
3586IEM_STATIC VBOXSTRICTRC
3587iemTaskSwitch(PVMCPU pVCpu,
3588 PCPUMCTX pCtx,
3589 IEMTASKSWITCH enmTaskSwitch,
3590 uint32_t uNextEip,
3591 uint32_t fFlags,
3592 uint16_t uErr,
3593 uint64_t uCr2,
3594 RTSEL SelTSS,
3595 PIEMSELDESC pNewDescTSS)
3596{
3597 Assert(!IEM_IS_REAL_MODE(pVCpu));
3598 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3599
3600 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3601 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3602 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3603 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3604 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3605
3606 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3607 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3608
3609 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3610 fIsNewTSS386, pCtx->eip, uNextEip));
3611
3612 /* Update CR2 in case it's a page-fault. */
3613 /** @todo This should probably be done much earlier in IEM/PGM. See
3614 * @bugref{5653#c49}. */
3615 if (fFlags & IEM_XCPT_FLAGS_CR2)
3616 pCtx->cr2 = uCr2;
3617
3618 /*
3619 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3620 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3621 */
3622 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3623 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3624 if (uNewTSSLimit < uNewTSSLimitMin)
3625 {
3626 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3627 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3629 }
3630
3631 /*
3632 * Check the current TSS limit. The last written byte to the current TSS during the
3633 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3634 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3635 *
3636 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3637 * end up with smaller than "legal" TSS limits.
3638 */
3639 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3640 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3641 if (uCurTSSLimit < uCurTSSLimitMin)
3642 {
3643 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3644 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3646 }
3647
3648 /*
3649 * Verify that the new TSS can be accessed and map it. Map only the required contents
3650 * and not the entire TSS.
3651 */
3652 void *pvNewTSS;
3653 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3654 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3655 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3656 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3657 * not perform correct translation if this happens. See Intel spec. 7.2.1
3658 * "Task-State Segment" */
3659 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3663 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3664 return rcStrict;
3665 }
3666
3667 /*
3668 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3669 */
3670 uint32_t u32EFlags = pCtx->eflags.u32;
3671 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3672 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3673 {
3674 PX86DESC pDescCurTSS;
3675 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3676 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3677 if (rcStrict != VINF_SUCCESS)
3678 {
3679 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3680 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3681 return rcStrict;
3682 }
3683
3684 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3685 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3686 if (rcStrict != VINF_SUCCESS)
3687 {
3688 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3689 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3690 return rcStrict;
3691 }
3692
3693 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3694 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3695 {
3696 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3697 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3698 u32EFlags &= ~X86_EFL_NT;
3699 }
3700 }
3701
3702 /*
3703 * Save the CPU state into the current TSS.
3704 */
3705 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3706 if (GCPtrNewTSS == GCPtrCurTSS)
3707 {
3708 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3709 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3710 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3711 }
3712 if (fIsNewTSS386)
3713 {
3714 /*
3715 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3716 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3717 */
3718 void *pvCurTSS32;
3719 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3720 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3721 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3722 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3723 if (rcStrict != VINF_SUCCESS)
3724 {
3725 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3726 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3727 return rcStrict;
3728 }
3729
3730 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3731 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3732 pCurTSS32->eip = uNextEip;
3733 pCurTSS32->eflags = u32EFlags;
3734 pCurTSS32->eax = pCtx->eax;
3735 pCurTSS32->ecx = pCtx->ecx;
3736 pCurTSS32->edx = pCtx->edx;
3737 pCurTSS32->ebx = pCtx->ebx;
3738 pCurTSS32->esp = pCtx->esp;
3739 pCurTSS32->ebp = pCtx->ebp;
3740 pCurTSS32->esi = pCtx->esi;
3741 pCurTSS32->edi = pCtx->edi;
3742 pCurTSS32->es = pCtx->es.Sel;
3743 pCurTSS32->cs = pCtx->cs.Sel;
3744 pCurTSS32->ss = pCtx->ss.Sel;
3745 pCurTSS32->ds = pCtx->ds.Sel;
3746 pCurTSS32->fs = pCtx->fs.Sel;
3747 pCurTSS32->gs = pCtx->gs.Sel;
3748
3749 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3750 if (rcStrict != VINF_SUCCESS)
3751 {
3752 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3753 VBOXSTRICTRC_VAL(rcStrict)));
3754 return rcStrict;
3755 }
3756 }
3757 else
3758 {
3759 /*
3760 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3761 */
3762 void *pvCurTSS16;
3763 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3764 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3765 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3766 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3767 if (rcStrict != VINF_SUCCESS)
3768 {
3769 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3770 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3771 return rcStrict;
3772 }
3773
3774 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3775 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3776 pCurTSS16->ip = uNextEip;
3777 pCurTSS16->flags = u32EFlags;
3778 pCurTSS16->ax = pCtx->ax;
3779 pCurTSS16->cx = pCtx->cx;
3780 pCurTSS16->dx = pCtx->dx;
3781 pCurTSS16->bx = pCtx->bx;
3782 pCurTSS16->sp = pCtx->sp;
3783 pCurTSS16->bp = pCtx->bp;
3784 pCurTSS16->si = pCtx->si;
3785 pCurTSS16->di = pCtx->di;
3786 pCurTSS16->es = pCtx->es.Sel;
3787 pCurTSS16->cs = pCtx->cs.Sel;
3788 pCurTSS16->ss = pCtx->ss.Sel;
3789 pCurTSS16->ds = pCtx->ds.Sel;
3790
3791 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3792 if (rcStrict != VINF_SUCCESS)
3793 {
3794 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3795 VBOXSTRICTRC_VAL(rcStrict)));
3796 return rcStrict;
3797 }
3798 }
3799
3800 /*
3801 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3802 */
3803 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3804 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3805 {
3806 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3807 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3808 pNewTSS->selPrev = pCtx->tr.Sel;
3809 }
3810
3811 /*
3812 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3813 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3814 */
3815 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3816 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3817 bool fNewDebugTrap;
3818 if (fIsNewTSS386)
3819 {
3820 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3821 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3822 uNewEip = pNewTSS32->eip;
3823 uNewEflags = pNewTSS32->eflags;
3824 uNewEax = pNewTSS32->eax;
3825 uNewEcx = pNewTSS32->ecx;
3826 uNewEdx = pNewTSS32->edx;
3827 uNewEbx = pNewTSS32->ebx;
3828 uNewEsp = pNewTSS32->esp;
3829 uNewEbp = pNewTSS32->ebp;
3830 uNewEsi = pNewTSS32->esi;
3831 uNewEdi = pNewTSS32->edi;
3832 uNewES = pNewTSS32->es;
3833 uNewCS = pNewTSS32->cs;
3834 uNewSS = pNewTSS32->ss;
3835 uNewDS = pNewTSS32->ds;
3836 uNewFS = pNewTSS32->fs;
3837 uNewGS = pNewTSS32->gs;
3838 uNewLdt = pNewTSS32->selLdt;
3839 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3840 }
3841 else
3842 {
3843 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3844 uNewCr3 = 0;
3845 uNewEip = pNewTSS16->ip;
3846 uNewEflags = pNewTSS16->flags;
3847 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3848 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3849 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3850 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3851 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3852 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3853 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3854 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3855 uNewES = pNewTSS16->es;
3856 uNewCS = pNewTSS16->cs;
3857 uNewSS = pNewTSS16->ss;
3858 uNewDS = pNewTSS16->ds;
3859 uNewFS = 0;
3860 uNewGS = 0;
3861 uNewLdt = pNewTSS16->selLdt;
3862 fNewDebugTrap = false;
3863 }
3864
3865 if (GCPtrNewTSS == GCPtrCurTSS)
3866 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3867 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3868
3869 /*
3870 * We're done accessing the new TSS.
3871 */
3872 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3873 if (rcStrict != VINF_SUCCESS)
3874 {
3875 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3876 return rcStrict;
3877 }
3878
3879 /*
3880 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3881 */
3882 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3883 {
3884 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3885 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3886 if (rcStrict != VINF_SUCCESS)
3887 {
3888 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3889 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3890 return rcStrict;
3891 }
3892
3893 /* Check that the descriptor indicates the new TSS is available (not busy). */
3894 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3895 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3896 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3897
3898 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3899 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3900 if (rcStrict != VINF_SUCCESS)
3901 {
3902 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3903 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3904 return rcStrict;
3905 }
3906 }
3907
3908 /*
3909 * From this point on, we're technically in the new task. We will defer exceptions
3910 * until the completion of the task switch but before executing any instructions in the new task.
3911 */
3912 pCtx->tr.Sel = SelTSS;
3913 pCtx->tr.ValidSel = SelTSS;
3914 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3915 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3916 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3917 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3918 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3919
3920 /* Set the busy bit in TR. */
3921 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3922 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3923 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3924 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3925 {
3926 uNewEflags |= X86_EFL_NT;
3927 }
3928
3929 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3930 pCtx->cr0 |= X86_CR0_TS;
3931 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3932
3933 pCtx->eip = uNewEip;
3934 pCtx->eax = uNewEax;
3935 pCtx->ecx = uNewEcx;
3936 pCtx->edx = uNewEdx;
3937 pCtx->ebx = uNewEbx;
3938 pCtx->esp = uNewEsp;
3939 pCtx->ebp = uNewEbp;
3940 pCtx->esi = uNewEsi;
3941 pCtx->edi = uNewEdi;
3942
3943 uNewEflags &= X86_EFL_LIVE_MASK;
3944 uNewEflags |= X86_EFL_RA1_MASK;
3945 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3946
3947 /*
3948 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3949 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3950 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3951 */
3952 pCtx->es.Sel = uNewES;
3953 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3954
3955 pCtx->cs.Sel = uNewCS;
3956 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3957
3958 pCtx->ss.Sel = uNewSS;
3959 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3960
3961 pCtx->ds.Sel = uNewDS;
3962 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3963
3964 pCtx->fs.Sel = uNewFS;
3965 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3966
3967 pCtx->gs.Sel = uNewGS;
3968 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3969 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3970
3971 pCtx->ldtr.Sel = uNewLdt;
3972 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3973 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3974 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3975
3976 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3977 {
3978 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3979 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3980 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3981 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3982 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3983 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3984 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3985 }
3986
3987 /*
3988 * Switch CR3 for the new task.
3989 */
3990 if ( fIsNewTSS386
3991 && (pCtx->cr0 & X86_CR0_PG))
3992 {
3993 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3994 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3995 {
3996 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3997 AssertRCSuccessReturn(rc, rc);
3998 }
3999 else
4000 pCtx->cr3 = uNewCr3;
4001
4002 /* Inform PGM. */
4003 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4004 {
4005 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4006 AssertRCReturn(rc, rc);
4007 /* ignore informational status codes */
4008 }
4009 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4010 }
4011
4012 /*
4013 * Switch LDTR for the new task.
4014 */
4015 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4016 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4017 else
4018 {
4019 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4020
4021 IEMSELDESC DescNewLdt;
4022 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4023 if (rcStrict != VINF_SUCCESS)
4024 {
4025 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4026 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4027 return rcStrict;
4028 }
4029 if ( !DescNewLdt.Legacy.Gen.u1Present
4030 || DescNewLdt.Legacy.Gen.u1DescType
4031 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4032 {
4033 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4034 uNewLdt, DescNewLdt.Legacy.u));
4035 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4036 }
4037
4038 pCtx->ldtr.ValidSel = uNewLdt;
4039 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4040 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4041 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4042 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4043 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4044 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4046 }
4047
4048 IEMSELDESC DescSS;
4049 if (IEM_IS_V86_MODE(pVCpu))
4050 {
4051 pVCpu->iem.s.uCpl = 3;
4052 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4053 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4054 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4055 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4056 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4057 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4058
4059 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4060 DescSS.Legacy.u = 0;
4061 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4062 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4063 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4064 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4065 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4066 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4067 DescSS.Legacy.Gen.u2Dpl = 3;
4068 }
4069 else
4070 {
4071 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4072
4073 /*
4074 * Load the stack segment for the new task.
4075 */
4076 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4077 {
4078 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4079 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4080 }
4081
4082 /* Fetch the descriptor. */
4083 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4084 if (rcStrict != VINF_SUCCESS)
4085 {
4086 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4087 VBOXSTRICTRC_VAL(rcStrict)));
4088 return rcStrict;
4089 }
4090
4091 /* SS must be a data segment and writable. */
4092 if ( !DescSS.Legacy.Gen.u1DescType
4093 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4094 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4095 {
4096 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4097 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4098 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4099 }
4100
4101 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4102 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4103 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4104 {
4105 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4106 uNewCpl));
4107 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4108 }
4109
4110 /* Is it there? */
4111 if (!DescSS.Legacy.Gen.u1Present)
4112 {
4113 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4114 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4115 }
4116
4117 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4118 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4119
4120 /* Set the accessed bit before committing the result into SS. */
4121 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4122 {
4123 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4124 if (rcStrict != VINF_SUCCESS)
4125 return rcStrict;
4126 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4127 }
4128
4129 /* Commit SS. */
4130 pCtx->ss.Sel = uNewSS;
4131 pCtx->ss.ValidSel = uNewSS;
4132 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4133 pCtx->ss.u32Limit = cbLimit;
4134 pCtx->ss.u64Base = u64Base;
4135 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4137
4138 /* CPL has changed, update IEM before loading rest of segments. */
4139 pVCpu->iem.s.uCpl = uNewCpl;
4140
4141 /*
4142 * Load the data segments for the new task.
4143 */
4144 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4145 if (rcStrict != VINF_SUCCESS)
4146 return rcStrict;
4147 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4148 if (rcStrict != VINF_SUCCESS)
4149 return rcStrict;
4150 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4151 if (rcStrict != VINF_SUCCESS)
4152 return rcStrict;
4153 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4154 if (rcStrict != VINF_SUCCESS)
4155 return rcStrict;
4156
4157 /*
4158 * Load the code segment for the new task.
4159 */
4160 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4161 {
4162 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4163 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4164 }
4165
4166 /* Fetch the descriptor. */
4167 IEMSELDESC DescCS;
4168 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4169 if (rcStrict != VINF_SUCCESS)
4170 {
4171 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4172 return rcStrict;
4173 }
4174
4175 /* CS must be a code segment. */
4176 if ( !DescCS.Legacy.Gen.u1DescType
4177 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4178 {
4179 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4180 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4181 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4182 }
4183
4184 /* For conforming CS, DPL must be less than or equal to the RPL. */
4185 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4186 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4187 {
4188 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4189 DescCS.Legacy.Gen.u2Dpl));
4190 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4191 }
4192
4193 /* For non-conforming CS, DPL must match RPL. */
4194 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4195 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4196 {
4197 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4198 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4199 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4200 }
4201
4202 /* Is it there? */
4203 if (!DescCS.Legacy.Gen.u1Present)
4204 {
4205 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4206 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4207 }
4208
4209 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4210 u64Base = X86DESC_BASE(&DescCS.Legacy);
4211
4212 /* Set the accessed bit before committing the result into CS. */
4213 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4214 {
4215 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4216 if (rcStrict != VINF_SUCCESS)
4217 return rcStrict;
4218 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4219 }
4220
4221 /* Commit CS. */
4222 pCtx->cs.Sel = uNewCS;
4223 pCtx->cs.ValidSel = uNewCS;
4224 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4225 pCtx->cs.u32Limit = cbLimit;
4226 pCtx->cs.u64Base = u64Base;
4227 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4229 }
4230
4231 /** @todo Debug trap. */
4232 if (fIsNewTSS386 && fNewDebugTrap)
4233 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4234
4235 /*
4236 * Construct the error code masks based on what caused this task switch.
4237 * See Intel Instruction reference for INT.
4238 */
4239 uint16_t uExt;
4240 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4241 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4242 {
4243 uExt = 1;
4244 }
4245 else
4246 uExt = 0;
4247
4248 /*
4249 * Push any error code on to the new stack.
4250 */
4251 if (fFlags & IEM_XCPT_FLAGS_ERR)
4252 {
4253 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4254 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4255 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4256
4257 /* Check that there is sufficient space on the stack. */
4258 /** @todo Factor out segment limit checking for normal/expand down segments
4259 * into a separate function. */
4260 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4261 {
4262 if ( pCtx->esp - 1 > cbLimitSS
4263 || pCtx->esp < cbStackFrame)
4264 {
4265 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4266 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4267 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4268 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4269 }
4270 }
4271 else
4272 {
4273 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4274 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4275 {
4276 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4277 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4278 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4279 }
4280 }
4281
4282
4283 if (fIsNewTSS386)
4284 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4285 else
4286 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4287 if (rcStrict != VINF_SUCCESS)
4288 {
4289 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4290 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4291 return rcStrict;
4292 }
4293 }
4294
4295 /* Check the new EIP against the new CS limit. */
4296 if (pCtx->eip > pCtx->cs.u32Limit)
4297 {
4298 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4299 pCtx->eip, pCtx->cs.u32Limit));
4300 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4301 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4302 }
4303
4304 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4305 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4306}
4307
4308
4309/**
4310 * Implements exceptions and interrupts for protected mode.
4311 *
4312 * @returns VBox strict status code.
4313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4314 * @param pCtx The CPU context.
4315 * @param cbInstr The number of bytes to offset rIP by in the return
4316 * address.
4317 * @param u8Vector The interrupt / exception vector number.
4318 * @param fFlags The flags.
4319 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4320 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4321 */
4322IEM_STATIC VBOXSTRICTRC
4323iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4324 PCPUMCTX pCtx,
4325 uint8_t cbInstr,
4326 uint8_t u8Vector,
4327 uint32_t fFlags,
4328 uint16_t uErr,
4329 uint64_t uCr2)
4330{
4331 /*
4332 * Read the IDT entry.
4333 */
4334 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4335 {
4336 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4337 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4338 }
4339 X86DESC Idte;
4340 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4341 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4342 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4343 return rcStrict;
4344 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4345 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4346 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4347
4348 /*
4349 * Check the descriptor type, DPL and such.
4350 * ASSUMES this is done in the same order as described for call-gate calls.
4351 */
4352 if (Idte.Gate.u1DescType)
4353 {
4354 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4355 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4356 }
4357 bool fTaskGate = false;
4358 uint8_t f32BitGate = true;
4359 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4360 switch (Idte.Gate.u4Type)
4361 {
4362 case X86_SEL_TYPE_SYS_UNDEFINED:
4363 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4364 case X86_SEL_TYPE_SYS_LDT:
4365 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4366 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4367 case X86_SEL_TYPE_SYS_UNDEFINED2:
4368 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4369 case X86_SEL_TYPE_SYS_UNDEFINED3:
4370 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4371 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4372 case X86_SEL_TYPE_SYS_UNDEFINED4:
4373 {
4374 /** @todo check what actually happens when the type is wrong...
4375 * esp. call gates. */
4376 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4377 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4378 }
4379
4380 case X86_SEL_TYPE_SYS_286_INT_GATE:
4381 f32BitGate = false;
4382 case X86_SEL_TYPE_SYS_386_INT_GATE:
4383 fEflToClear |= X86_EFL_IF;
4384 break;
4385
4386 case X86_SEL_TYPE_SYS_TASK_GATE:
4387 fTaskGate = true;
4388#ifndef IEM_IMPLEMENTS_TASKSWITCH
4389 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4390#endif
4391 break;
4392
4393 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4394 f32BitGate = false;
4395 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4396 break;
4397
4398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4399 }
4400
4401 /* Check DPL against CPL if applicable. */
4402 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4403 {
4404 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4405 {
4406 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4407 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4408 }
4409 }
4410
4411 /* Is it there? */
4412 if (!Idte.Gate.u1Present)
4413 {
4414 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4415 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4416 }
4417
4418 /* Is it a task-gate? */
4419 if (fTaskGate)
4420 {
4421 /*
4422 * Construct the error code masks based on what caused this task switch.
4423 * See Intel Instruction reference for INT.
4424 */
4425 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4426 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4427 RTSEL SelTSS = Idte.Gate.u16Sel;
4428
4429 /*
4430 * Fetch the TSS descriptor in the GDT.
4431 */
4432 IEMSELDESC DescTSS;
4433 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4434 if (rcStrict != VINF_SUCCESS)
4435 {
4436 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4437 VBOXSTRICTRC_VAL(rcStrict)));
4438 return rcStrict;
4439 }
4440
4441 /* The TSS descriptor must be a system segment and be available (not busy). */
4442 if ( DescTSS.Legacy.Gen.u1DescType
4443 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4444 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4445 {
4446 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4447 u8Vector, SelTSS, DescTSS.Legacy.au64));
4448 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4449 }
4450
4451 /* The TSS must be present. */
4452 if (!DescTSS.Legacy.Gen.u1Present)
4453 {
4454 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4455 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4456 }
4457
4458 /* Do the actual task switch. */
4459 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4460 }
4461
4462 /* A null CS is bad. */
4463 RTSEL NewCS = Idte.Gate.u16Sel;
4464 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4465 {
4466 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4467 return iemRaiseGeneralProtectionFault0(pVCpu);
4468 }
4469
4470 /* Fetch the descriptor for the new CS. */
4471 IEMSELDESC DescCS;
4472 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4473 if (rcStrict != VINF_SUCCESS)
4474 {
4475 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4476 return rcStrict;
4477 }
4478
4479 /* Must be a code segment. */
4480 if (!DescCS.Legacy.Gen.u1DescType)
4481 {
4482 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4483 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4484 }
4485 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4486 {
4487 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4488 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4489 }
4490
4491 /* Don't allow lowering the privilege level. */
4492 /** @todo Does the lowering of privileges apply to software interrupts
4493 * only? This has bearings on the more-privileged or
4494 * same-privilege stack behavior further down. A testcase would
4495 * be nice. */
4496 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4497 {
4498 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4499 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4500 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* Make sure the selector is present. */
4504 if (!DescCS.Legacy.Gen.u1Present)
4505 {
4506 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4507 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4508 }
4509
4510 /* Check the new EIP against the new CS limit. */
4511 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4512 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4513 ? Idte.Gate.u16OffsetLow
4514 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4515 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4516 if (uNewEip > cbLimitCS)
4517 {
4518 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4519 u8Vector, uNewEip, cbLimitCS, NewCS));
4520 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4521 }
4522 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4523
4524 /* Calc the flag image to push. */
4525 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4526 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4527 fEfl &= ~X86_EFL_RF;
4528 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4529 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4530
4531 /* From V8086 mode only go to CPL 0. */
4532 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4533 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4534 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4535 {
4536 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4537 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4538 }
4539
4540 /*
4541 * If the privilege level changes, we need to get a new stack from the TSS.
4542 * This in turns means validating the new SS and ESP...
4543 */
4544 if (uNewCpl != pVCpu->iem.s.uCpl)
4545 {
4546 RTSEL NewSS;
4547 uint32_t uNewEsp;
4548 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4549 if (rcStrict != VINF_SUCCESS)
4550 return rcStrict;
4551
4552 IEMSELDESC DescSS;
4553 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4554 if (rcStrict != VINF_SUCCESS)
4555 return rcStrict;
4556 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4557
4558 /* Check that there is sufficient space for the stack frame. */
4559 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4560 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4561 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4562 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4563
4564 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4565 {
4566 if ( uNewEsp - 1 > cbLimitSS
4567 || uNewEsp < cbStackFrame)
4568 {
4569 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4570 u8Vector, NewSS, uNewEsp, cbStackFrame));
4571 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4572 }
4573 }
4574 else
4575 {
4576 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4577 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4578 {
4579 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4580 u8Vector, NewSS, uNewEsp, cbStackFrame));
4581 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4582 }
4583 }
4584
4585 /*
4586 * Start making changes.
4587 */
4588
4589 /* Set the new CPL so that stack accesses use it. */
4590 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4591 pVCpu->iem.s.uCpl = uNewCpl;
4592
4593 /* Create the stack frame. */
4594 RTPTRUNION uStackFrame;
4595 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4596 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4597 if (rcStrict != VINF_SUCCESS)
4598 return rcStrict;
4599 void * const pvStackFrame = uStackFrame.pv;
4600 if (f32BitGate)
4601 {
4602 if (fFlags & IEM_XCPT_FLAGS_ERR)
4603 *uStackFrame.pu32++ = uErr;
4604 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4605 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4606 uStackFrame.pu32[2] = fEfl;
4607 uStackFrame.pu32[3] = pCtx->esp;
4608 uStackFrame.pu32[4] = pCtx->ss.Sel;
4609 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4610 if (fEfl & X86_EFL_VM)
4611 {
4612 uStackFrame.pu32[1] = pCtx->cs.Sel;
4613 uStackFrame.pu32[5] = pCtx->es.Sel;
4614 uStackFrame.pu32[6] = pCtx->ds.Sel;
4615 uStackFrame.pu32[7] = pCtx->fs.Sel;
4616 uStackFrame.pu32[8] = pCtx->gs.Sel;
4617 }
4618 }
4619 else
4620 {
4621 if (fFlags & IEM_XCPT_FLAGS_ERR)
4622 *uStackFrame.pu16++ = uErr;
4623 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4624 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4625 uStackFrame.pu16[2] = fEfl;
4626 uStackFrame.pu16[3] = pCtx->sp;
4627 uStackFrame.pu16[4] = pCtx->ss.Sel;
4628 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4629 if (fEfl & X86_EFL_VM)
4630 {
4631 uStackFrame.pu16[1] = pCtx->cs.Sel;
4632 uStackFrame.pu16[5] = pCtx->es.Sel;
4633 uStackFrame.pu16[6] = pCtx->ds.Sel;
4634 uStackFrame.pu16[7] = pCtx->fs.Sel;
4635 uStackFrame.pu16[8] = pCtx->gs.Sel;
4636 }
4637 }
4638 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4639 if (rcStrict != VINF_SUCCESS)
4640 return rcStrict;
4641
4642 /* Mark the selectors 'accessed' (hope this is the correct time). */
4643 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4644 * after pushing the stack frame? (Write protect the gdt + stack to
4645 * find out.) */
4646 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4647 {
4648 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4649 if (rcStrict != VINF_SUCCESS)
4650 return rcStrict;
4651 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4652 }
4653
4654 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4655 {
4656 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4657 if (rcStrict != VINF_SUCCESS)
4658 return rcStrict;
4659 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4660 }
4661
4662 /*
4663 * Start comitting the register changes (joins with the DPL=CPL branch).
4664 */
4665 pCtx->ss.Sel = NewSS;
4666 pCtx->ss.ValidSel = NewSS;
4667 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4668 pCtx->ss.u32Limit = cbLimitSS;
4669 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4670 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4671 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4672 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4673 * SP is loaded).
4674 * Need to check the other combinations too:
4675 * - 16-bit TSS, 32-bit handler
4676 * - 32-bit TSS, 16-bit handler */
4677 if (!pCtx->ss.Attr.n.u1DefBig)
4678 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4679 else
4680 pCtx->rsp = uNewEsp - cbStackFrame;
4681
4682 if (fEfl & X86_EFL_VM)
4683 {
4684 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4685 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4686 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4687 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4688 }
4689 }
4690 /*
4691 * Same privilege, no stack change and smaller stack frame.
4692 */
4693 else
4694 {
4695 uint64_t uNewRsp;
4696 RTPTRUNION uStackFrame;
4697 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4698 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4699 if (rcStrict != VINF_SUCCESS)
4700 return rcStrict;
4701 void * const pvStackFrame = uStackFrame.pv;
4702
4703 if (f32BitGate)
4704 {
4705 if (fFlags & IEM_XCPT_FLAGS_ERR)
4706 *uStackFrame.pu32++ = uErr;
4707 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4708 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4709 uStackFrame.pu32[2] = fEfl;
4710 }
4711 else
4712 {
4713 if (fFlags & IEM_XCPT_FLAGS_ERR)
4714 *uStackFrame.pu16++ = uErr;
4715 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4716 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4717 uStackFrame.pu16[2] = fEfl;
4718 }
4719 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4720 if (rcStrict != VINF_SUCCESS)
4721 return rcStrict;
4722
4723 /* Mark the CS selector as 'accessed'. */
4724 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4725 {
4726 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4727 if (rcStrict != VINF_SUCCESS)
4728 return rcStrict;
4729 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4730 }
4731
4732 /*
4733 * Start committing the register changes (joins with the other branch).
4734 */
4735 pCtx->rsp = uNewRsp;
4736 }
4737
4738 /* ... register committing continues. */
4739 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4740 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4741 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4742 pCtx->cs.u32Limit = cbLimitCS;
4743 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4744 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4745
4746 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4747 fEfl &= ~fEflToClear;
4748 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4749
4750 if (fFlags & IEM_XCPT_FLAGS_CR2)
4751 pCtx->cr2 = uCr2;
4752
4753 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4754 iemRaiseXcptAdjustState(pCtx, u8Vector);
4755
4756 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4757}
4758
4759
4760/**
4761 * Implements exceptions and interrupts for long mode.
4762 *
4763 * @returns VBox strict status code.
4764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4765 * @param pCtx The CPU context.
4766 * @param cbInstr The number of bytes to offset rIP by in the return
4767 * address.
4768 * @param u8Vector The interrupt / exception vector number.
4769 * @param fFlags The flags.
4770 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4771 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4772 */
4773IEM_STATIC VBOXSTRICTRC
4774iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4775 PCPUMCTX pCtx,
4776 uint8_t cbInstr,
4777 uint8_t u8Vector,
4778 uint32_t fFlags,
4779 uint16_t uErr,
4780 uint64_t uCr2)
4781{
4782 /*
4783 * Read the IDT entry.
4784 */
4785 uint16_t offIdt = (uint16_t)u8Vector << 4;
4786 if (pCtx->idtr.cbIdt < offIdt + 7)
4787 {
4788 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4789 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4790 }
4791 X86DESC64 Idte;
4792 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4793 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4794 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4795 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4796 return rcStrict;
4797 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4798 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4799 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4800
4801 /*
4802 * Check the descriptor type, DPL and such.
4803 * ASSUMES this is done in the same order as described for call-gate calls.
4804 */
4805 if (Idte.Gate.u1DescType)
4806 {
4807 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4808 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4809 }
4810 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4811 switch (Idte.Gate.u4Type)
4812 {
4813 case AMD64_SEL_TYPE_SYS_INT_GATE:
4814 fEflToClear |= X86_EFL_IF;
4815 break;
4816 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4817 break;
4818
4819 default:
4820 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4822 }
4823
4824 /* Check DPL against CPL if applicable. */
4825 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4826 {
4827 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4828 {
4829 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4830 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4831 }
4832 }
4833
4834 /* Is it there? */
4835 if (!Idte.Gate.u1Present)
4836 {
4837 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4838 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4839 }
4840
4841 /* A null CS is bad. */
4842 RTSEL NewCS = Idte.Gate.u16Sel;
4843 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4844 {
4845 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4846 return iemRaiseGeneralProtectionFault0(pVCpu);
4847 }
4848
4849 /* Fetch the descriptor for the new CS. */
4850 IEMSELDESC DescCS;
4851 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4852 if (rcStrict != VINF_SUCCESS)
4853 {
4854 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4855 return rcStrict;
4856 }
4857
4858 /* Must be a 64-bit code segment. */
4859 if (!DescCS.Long.Gen.u1DescType)
4860 {
4861 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4862 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4863 }
4864 if ( !DescCS.Long.Gen.u1Long
4865 || DescCS.Long.Gen.u1DefBig
4866 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4867 {
4868 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4869 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872
4873 /* Don't allow lowering the privilege level. For non-conforming CS
4874 selectors, the CS.DPL sets the privilege level the trap/interrupt
4875 handler runs at. For conforming CS selectors, the CPL remains
4876 unchanged, but the CS.DPL must be <= CPL. */
4877 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4878 * when CPU in Ring-0. Result \#GP? */
4879 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4880 {
4881 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4882 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4883 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4884 }
4885
4886
4887 /* Make sure the selector is present. */
4888 if (!DescCS.Legacy.Gen.u1Present)
4889 {
4890 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4891 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4892 }
4893
4894 /* Check that the new RIP is canonical. */
4895 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4896 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4897 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4898 if (!IEM_IS_CANONICAL(uNewRip))
4899 {
4900 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4901 return iemRaiseGeneralProtectionFault0(pVCpu);
4902 }
4903
4904 /*
4905 * If the privilege level changes or if the IST isn't zero, we need to get
4906 * a new stack from the TSS.
4907 */
4908 uint64_t uNewRsp;
4909 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4910 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4911 if ( uNewCpl != pVCpu->iem.s.uCpl
4912 || Idte.Gate.u3IST != 0)
4913 {
4914 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4915 if (rcStrict != VINF_SUCCESS)
4916 return rcStrict;
4917 }
4918 else
4919 uNewRsp = pCtx->rsp;
4920 uNewRsp &= ~(uint64_t)0xf;
4921
4922 /*
4923 * Calc the flag image to push.
4924 */
4925 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4926 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4927 fEfl &= ~X86_EFL_RF;
4928 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4929 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4930
4931 /*
4932 * Start making changes.
4933 */
4934 /* Set the new CPL so that stack accesses use it. */
4935 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4936 pVCpu->iem.s.uCpl = uNewCpl;
4937
4938 /* Create the stack frame. */
4939 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4940 RTPTRUNION uStackFrame;
4941 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4942 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4943 if (rcStrict != VINF_SUCCESS)
4944 return rcStrict;
4945 void * const pvStackFrame = uStackFrame.pv;
4946
4947 if (fFlags & IEM_XCPT_FLAGS_ERR)
4948 *uStackFrame.pu64++ = uErr;
4949 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4950 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4951 uStackFrame.pu64[2] = fEfl;
4952 uStackFrame.pu64[3] = pCtx->rsp;
4953 uStackFrame.pu64[4] = pCtx->ss.Sel;
4954 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4955 if (rcStrict != VINF_SUCCESS)
4956 return rcStrict;
4957
4958 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4959 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4960 * after pushing the stack frame? (Write protect the gdt + stack to
4961 * find out.) */
4962 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4963 {
4964 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4965 if (rcStrict != VINF_SUCCESS)
4966 return rcStrict;
4967 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4968 }
4969
4970 /*
4971 * Start comitting the register changes.
4972 */
4973 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4974 * hidden registers when interrupting 32-bit or 16-bit code! */
4975 if (uNewCpl != uOldCpl)
4976 {
4977 pCtx->ss.Sel = 0 | uNewCpl;
4978 pCtx->ss.ValidSel = 0 | uNewCpl;
4979 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4980 pCtx->ss.u32Limit = UINT32_MAX;
4981 pCtx->ss.u64Base = 0;
4982 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4983 }
4984 pCtx->rsp = uNewRsp - cbStackFrame;
4985 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4986 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4987 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4988 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4989 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4990 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4991 pCtx->rip = uNewRip;
4992
4993 fEfl &= ~fEflToClear;
4994 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4995
4996 if (fFlags & IEM_XCPT_FLAGS_CR2)
4997 pCtx->cr2 = uCr2;
4998
4999 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5000 iemRaiseXcptAdjustState(pCtx, u8Vector);
5001
5002 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5003}
5004
5005
5006/**
5007 * Implements exceptions and interrupts.
5008 *
5009 * All exceptions and interrupts goes thru this function!
5010 *
5011 * @returns VBox strict status code.
5012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5013 * @param cbInstr The number of bytes to offset rIP by in the return
5014 * address.
5015 * @param u8Vector The interrupt / exception vector number.
5016 * @param fFlags The flags.
5017 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5018 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5019 */
5020DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5021iemRaiseXcptOrInt(PVMCPU pVCpu,
5022 uint8_t cbInstr,
5023 uint8_t u8Vector,
5024 uint32_t fFlags,
5025 uint16_t uErr,
5026 uint64_t uCr2)
5027{
5028 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5029#ifdef IN_RING0
5030 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5031 AssertRCReturn(rc, rc);
5032#endif
5033
5034#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5035 /*
5036 * Flush prefetch buffer
5037 */
5038 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5039#endif
5040
5041 /*
5042 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5043 */
5044 if ( pCtx->eflags.Bits.u1VM
5045 && pCtx->eflags.Bits.u2IOPL != 3
5046 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5047 && (pCtx->cr0 & X86_CR0_PE) )
5048 {
5049 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5050 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5051 u8Vector = X86_XCPT_GP;
5052 uErr = 0;
5053 }
5054#ifdef DBGFTRACE_ENABLED
5055 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5056 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5057 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5058#endif
5059
5060 /*
5061 * Do recursion accounting.
5062 */
5063 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5064 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5065 if (pVCpu->iem.s.cXcptRecursions == 0)
5066 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5067 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5068 else
5069 {
5070 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5071 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5072
5073 /** @todo double and tripple faults. */
5074 if (pVCpu->iem.s.cXcptRecursions >= 3)
5075 {
5076#ifdef DEBUG_bird
5077 AssertFailed();
5078#endif
5079 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5080 }
5081
5082 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5083 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5084 {
5085 ....
5086 } */
5087 }
5088 pVCpu->iem.s.cXcptRecursions++;
5089 pVCpu->iem.s.uCurXcpt = u8Vector;
5090 pVCpu->iem.s.fCurXcpt = fFlags;
5091
5092 /*
5093 * Extensive logging.
5094 */
5095#if defined(LOG_ENABLED) && defined(IN_RING3)
5096 if (LogIs3Enabled())
5097 {
5098 PVM pVM = pVCpu->CTX_SUFF(pVM);
5099 char szRegs[4096];
5100 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5101 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5102 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5103 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5104 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5105 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5106 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5107 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5108 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5109 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5110 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5111 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5112 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5113 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5114 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5115 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5116 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5117 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5118 " efer=%016VR{efer}\n"
5119 " pat=%016VR{pat}\n"
5120 " sf_mask=%016VR{sf_mask}\n"
5121 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5122 " lstar=%016VR{lstar}\n"
5123 " star=%016VR{star} cstar=%016VR{cstar}\n"
5124 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5125 );
5126
5127 char szInstr[256];
5128 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5129 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5130 szInstr, sizeof(szInstr), NULL);
5131 Log3(("%s%s\n", szRegs, szInstr));
5132 }
5133#endif /* LOG_ENABLED */
5134
5135 /*
5136 * Call the mode specific worker function.
5137 */
5138 VBOXSTRICTRC rcStrict;
5139 if (!(pCtx->cr0 & X86_CR0_PE))
5140 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5141 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5142 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5143 else
5144 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5145
5146 /* Flush the prefetch buffer. */
5147#ifdef IEM_WITH_CODE_TLB
5148 pVCpu->iem.s.pbInstrBuf = NULL;
5149#else
5150 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5151#endif
5152
5153 /*
5154 * Unwind.
5155 */
5156 pVCpu->iem.s.cXcptRecursions--;
5157 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5158 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5159 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5160 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5161 return rcStrict;
5162}
5163
5164#ifdef IEM_WITH_SETJMP
5165/**
5166 * See iemRaiseXcptOrInt. Will not return.
5167 */
5168IEM_STATIC DECL_NO_RETURN(void)
5169iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5170 uint8_t cbInstr,
5171 uint8_t u8Vector,
5172 uint32_t fFlags,
5173 uint16_t uErr,
5174 uint64_t uCr2)
5175{
5176 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5177 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5178}
5179#endif
5180
5181
5182/** \#DE - 00. */
5183DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5184{
5185 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5186}
5187
5188
5189/** \#DB - 01.
5190 * @note This automatically clear DR7.GD. */
5191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5192{
5193 /** @todo set/clear RF. */
5194 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5196}
5197
5198
5199/** \#UD - 06. */
5200DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5201{
5202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5203}
5204
5205
5206/** \#NM - 07. */
5207DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5208{
5209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5210}
5211
5212
5213/** \#TS(err) - 0a. */
5214DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5215{
5216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5217}
5218
5219
5220/** \#TS(tr) - 0a. */
5221DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5222{
5223 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5224 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5225}
5226
5227
5228/** \#TS(0) - 0a. */
5229DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5230{
5231 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5232 0, 0);
5233}
5234
5235
5236/** \#TS(err) - 0a. */
5237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5238{
5239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5240 uSel & X86_SEL_MASK_OFF_RPL, 0);
5241}
5242
5243
5244/** \#NP(err) - 0b. */
5245DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5246{
5247 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5248}
5249
5250
5251/** \#NP(seg) - 0b. */
5252DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5253{
5254 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5255 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5256}
5257
5258
5259/** \#NP(sel) - 0b. */
5260DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5261{
5262 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5263 uSel & ~X86_SEL_RPL, 0);
5264}
5265
5266
5267/** \#SS(seg) - 0c. */
5268DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5269{
5270 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5271 uSel & ~X86_SEL_RPL, 0);
5272}
5273
5274
5275/** \#SS(err) - 0c. */
5276DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5277{
5278 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5279}
5280
5281
5282/** \#GP(n) - 0d. */
5283DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5284{
5285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5286}
5287
5288
5289/** \#GP(0) - 0d. */
5290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5291{
5292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5293}
5294
5295#ifdef IEM_WITH_SETJMP
5296/** \#GP(0) - 0d. */
5297DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5298{
5299 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5300}
5301#endif
5302
5303
5304/** \#GP(sel) - 0d. */
5305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5306{
5307 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5308 Sel & ~X86_SEL_RPL, 0);
5309}
5310
5311
5312/** \#GP(0) - 0d. */
5313DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5314{
5315 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5316}
5317
5318
5319/** \#GP(sel) - 0d. */
5320DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5321{
5322 NOREF(iSegReg); NOREF(fAccess);
5323 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5324 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5325}
5326
5327#ifdef IEM_WITH_SETJMP
5328/** \#GP(sel) - 0d, longjmp. */
5329DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5330{
5331 NOREF(iSegReg); NOREF(fAccess);
5332 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5333 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5334}
5335#endif
5336
5337/** \#GP(sel) - 0d. */
5338DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5339{
5340 NOREF(Sel);
5341 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5342}
5343
5344#ifdef IEM_WITH_SETJMP
5345/** \#GP(sel) - 0d, longjmp. */
5346DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5347{
5348 NOREF(Sel);
5349 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5350}
5351#endif
5352
5353
5354/** \#GP(sel) - 0d. */
5355DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5356{
5357 NOREF(iSegReg); NOREF(fAccess);
5358 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5359}
5360
5361#ifdef IEM_WITH_SETJMP
5362/** \#GP(sel) - 0d, longjmp. */
5363DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5364 uint32_t fAccess)
5365{
5366 NOREF(iSegReg); NOREF(fAccess);
5367 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5368}
5369#endif
5370
5371
5372/** \#PF(n) - 0e. */
5373DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5374{
5375 uint16_t uErr;
5376 switch (rc)
5377 {
5378 case VERR_PAGE_NOT_PRESENT:
5379 case VERR_PAGE_TABLE_NOT_PRESENT:
5380 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5381 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5382 uErr = 0;
5383 break;
5384
5385 default:
5386 AssertMsgFailed(("%Rrc\n", rc));
5387 case VERR_ACCESS_DENIED:
5388 uErr = X86_TRAP_PF_P;
5389 break;
5390
5391 /** @todo reserved */
5392 }
5393
5394 if (pVCpu->iem.s.uCpl == 3)
5395 uErr |= X86_TRAP_PF_US;
5396
5397 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5398 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5399 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5400 uErr |= X86_TRAP_PF_ID;
5401
5402#if 0 /* This is so much non-sense, really. Why was it done like that? */
5403 /* Note! RW access callers reporting a WRITE protection fault, will clear
5404 the READ flag before calling. So, read-modify-write accesses (RW)
5405 can safely be reported as READ faults. */
5406 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5407 uErr |= X86_TRAP_PF_RW;
5408#else
5409 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5410 {
5411 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5412 uErr |= X86_TRAP_PF_RW;
5413 }
5414#endif
5415
5416 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5417 uErr, GCPtrWhere);
5418}
5419
5420#ifdef IEM_WITH_SETJMP
5421/** \#PF(n) - 0e, longjmp. */
5422IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5423{
5424 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5425}
5426#endif
5427
5428
5429/** \#MF(0) - 10. */
5430DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5431{
5432 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5433}
5434
5435
5436/** \#AC(0) - 11. */
5437DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5438{
5439 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5440}
5441
5442
5443/**
5444 * Macro for calling iemCImplRaiseDivideError().
5445 *
5446 * This enables us to add/remove arguments and force different levels of
5447 * inlining as we wish.
5448 *
5449 * @return Strict VBox status code.
5450 */
5451#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5452IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5453{
5454 NOREF(cbInstr);
5455 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5456}
5457
5458
5459/**
5460 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5461 *
5462 * This enables us to add/remove arguments and force different levels of
5463 * inlining as we wish.
5464 *
5465 * @return Strict VBox status code.
5466 */
5467#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5468IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5469{
5470 NOREF(cbInstr);
5471 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5472}
5473
5474
5475/**
5476 * Macro for calling iemCImplRaiseInvalidOpcode().
5477 *
5478 * This enables us to add/remove arguments and force different levels of
5479 * inlining as we wish.
5480 *
5481 * @return Strict VBox status code.
5482 */
5483#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5484IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5485{
5486 NOREF(cbInstr);
5487 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5488}
5489
5490
5491/** @} */
5492
5493
5494/*
5495 *
5496 * Helpers routines.
5497 * Helpers routines.
5498 * Helpers routines.
5499 *
5500 */
5501
5502/**
5503 * Recalculates the effective operand size.
5504 *
5505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5506 */
5507IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5508{
5509 switch (pVCpu->iem.s.enmCpuMode)
5510 {
5511 case IEMMODE_16BIT:
5512 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5513 break;
5514 case IEMMODE_32BIT:
5515 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5516 break;
5517 case IEMMODE_64BIT:
5518 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5519 {
5520 case 0:
5521 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5522 break;
5523 case IEM_OP_PRF_SIZE_OP:
5524 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5525 break;
5526 case IEM_OP_PRF_SIZE_REX_W:
5527 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5528 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5529 break;
5530 }
5531 break;
5532 default:
5533 AssertFailed();
5534 }
5535}
5536
5537
5538/**
5539 * Sets the default operand size to 64-bit and recalculates the effective
5540 * operand size.
5541 *
5542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5543 */
5544IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5545{
5546 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5547 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5548 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5549 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5550 else
5551 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5552}
5553
5554
5555/*
5556 *
5557 * Common opcode decoders.
5558 * Common opcode decoders.
5559 * Common opcode decoders.
5560 *
5561 */
5562//#include <iprt/mem.h>
5563
5564/**
5565 * Used to add extra details about a stub case.
5566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5567 */
5568IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5569{
5570#if defined(LOG_ENABLED) && defined(IN_RING3)
5571 PVM pVM = pVCpu->CTX_SUFF(pVM);
5572 char szRegs[4096];
5573 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5574 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5575 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5576 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5577 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5578 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5579 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5580 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5581 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5582 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5583 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5584 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5585 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5586 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5587 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5588 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5589 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5590 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5591 " efer=%016VR{efer}\n"
5592 " pat=%016VR{pat}\n"
5593 " sf_mask=%016VR{sf_mask}\n"
5594 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5595 " lstar=%016VR{lstar}\n"
5596 " star=%016VR{star} cstar=%016VR{cstar}\n"
5597 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5598 );
5599
5600 char szInstr[256];
5601 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5602 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5603 szInstr, sizeof(szInstr), NULL);
5604
5605 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5606#else
5607 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5608#endif
5609}
5610
5611/**
5612 * Complains about a stub.
5613 *
5614 * Providing two versions of this macro, one for daily use and one for use when
5615 * working on IEM.
5616 */
5617#if 0
5618# define IEMOP_BITCH_ABOUT_STUB() \
5619 do { \
5620 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5621 iemOpStubMsg2(pVCpu); \
5622 RTAssertPanic(); \
5623 } while (0)
5624#else
5625# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5626#endif
5627
5628/** Stubs an opcode. */
5629#define FNIEMOP_STUB(a_Name) \
5630 FNIEMOP_DEF(a_Name) \
5631 { \
5632 RT_NOREF_PV(pVCpu); \
5633 IEMOP_BITCH_ABOUT_STUB(); \
5634 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5635 } \
5636 typedef int ignore_semicolon
5637
5638/** Stubs an opcode. */
5639#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5640 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5641 { \
5642 RT_NOREF_PV(pVCpu); \
5643 RT_NOREF_PV(a_Name0); \
5644 IEMOP_BITCH_ABOUT_STUB(); \
5645 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5646 } \
5647 typedef int ignore_semicolon
5648
5649/** Stubs an opcode which currently should raise \#UD. */
5650#define FNIEMOP_UD_STUB(a_Name) \
5651 FNIEMOP_DEF(a_Name) \
5652 { \
5653 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5654 return IEMOP_RAISE_INVALID_OPCODE(); \
5655 } \
5656 typedef int ignore_semicolon
5657
5658/** Stubs an opcode which currently should raise \#UD. */
5659#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5660 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5661 { \
5662 RT_NOREF_PV(pVCpu); \
5663 RT_NOREF_PV(a_Name0); \
5664 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5665 return IEMOP_RAISE_INVALID_OPCODE(); \
5666 } \
5667 typedef int ignore_semicolon
5668
5669
5670
5671/** @name Register Access.
5672 * @{
5673 */
5674
5675/**
5676 * Gets a reference (pointer) to the specified hidden segment register.
5677 *
5678 * @returns Hidden register reference.
5679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5680 * @param iSegReg The segment register.
5681 */
5682IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5683{
5684 Assert(iSegReg < X86_SREG_COUNT);
5685 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5686 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5687
5688#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5689 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5690 { /* likely */ }
5691 else
5692 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5693#else
5694 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5695#endif
5696 return pSReg;
5697}
5698
5699
5700/**
5701 * Ensures that the given hidden segment register is up to date.
5702 *
5703 * @returns Hidden register reference.
5704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5705 * @param pSReg The segment register.
5706 */
5707IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5708{
5709#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5710 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5711 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5712#else
5713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5714 NOREF(pVCpu);
5715#endif
5716 return pSReg;
5717}
5718
5719
5720/**
5721 * Gets a reference (pointer) to the specified segment register (the selector
5722 * value).
5723 *
5724 * @returns Pointer to the selector variable.
5725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5726 * @param iSegReg The segment register.
5727 */
5728DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5729{
5730 Assert(iSegReg < X86_SREG_COUNT);
5731 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5732 return &pCtx->aSRegs[iSegReg].Sel;
5733}
5734
5735
5736/**
5737 * Fetches the selector value of a segment register.
5738 *
5739 * @returns The selector value.
5740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5741 * @param iSegReg The segment register.
5742 */
5743DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5744{
5745 Assert(iSegReg < X86_SREG_COUNT);
5746 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5747}
5748
5749
5750/**
5751 * Gets a reference (pointer) to the specified general purpose register.
5752 *
5753 * @returns Register reference.
5754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5755 * @param iReg The general purpose register.
5756 */
5757DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5758{
5759 Assert(iReg < 16);
5760 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5761 return &pCtx->aGRegs[iReg];
5762}
5763
5764
5765/**
5766 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5767 *
5768 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5769 *
5770 * @returns Register reference.
5771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5772 * @param iReg The register.
5773 */
5774DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5775{
5776 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5777 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5778 {
5779 Assert(iReg < 16);
5780 return &pCtx->aGRegs[iReg].u8;
5781 }
5782 /* high 8-bit register. */
5783 Assert(iReg < 8);
5784 return &pCtx->aGRegs[iReg & 3].bHi;
5785}
5786
5787
5788/**
5789 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5790 *
5791 * @returns Register reference.
5792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5793 * @param iReg The register.
5794 */
5795DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5796{
5797 Assert(iReg < 16);
5798 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5799 return &pCtx->aGRegs[iReg].u16;
5800}
5801
5802
5803/**
5804 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5805 *
5806 * @returns Register reference.
5807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5808 * @param iReg The register.
5809 */
5810DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5811{
5812 Assert(iReg < 16);
5813 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5814 return &pCtx->aGRegs[iReg].u32;
5815}
5816
5817
5818/**
5819 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5820 *
5821 * @returns Register reference.
5822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5823 * @param iReg The register.
5824 */
5825DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5826{
5827 Assert(iReg < 64);
5828 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5829 return &pCtx->aGRegs[iReg].u64;
5830}
5831
5832
5833/**
5834 * Fetches the value of a 8-bit general purpose register.
5835 *
5836 * @returns The register value.
5837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5838 * @param iReg The register.
5839 */
5840DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5841{
5842 return *iemGRegRefU8(pVCpu, iReg);
5843}
5844
5845
5846/**
5847 * Fetches the value of a 16-bit general purpose register.
5848 *
5849 * @returns The register value.
5850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5851 * @param iReg The register.
5852 */
5853DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5854{
5855 Assert(iReg < 16);
5856 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5857}
5858
5859
5860/**
5861 * Fetches the value of a 32-bit general purpose register.
5862 *
5863 * @returns The register value.
5864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5865 * @param iReg The register.
5866 */
5867DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5868{
5869 Assert(iReg < 16);
5870 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5871}
5872
5873
5874/**
5875 * Fetches the value of a 64-bit general purpose register.
5876 *
5877 * @returns The register value.
5878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5879 * @param iReg The register.
5880 */
5881DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5882{
5883 Assert(iReg < 16);
5884 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5885}
5886
5887
5888/**
5889 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5890 *
5891 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5892 * segment limit.
5893 *
5894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5895 * @param offNextInstr The offset of the next instruction.
5896 */
5897IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5898{
5899 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5900 switch (pVCpu->iem.s.enmEffOpSize)
5901 {
5902 case IEMMODE_16BIT:
5903 {
5904 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5905 if ( uNewIp > pCtx->cs.u32Limit
5906 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5907 return iemRaiseGeneralProtectionFault0(pVCpu);
5908 pCtx->rip = uNewIp;
5909 break;
5910 }
5911
5912 case IEMMODE_32BIT:
5913 {
5914 Assert(pCtx->rip <= UINT32_MAX);
5915 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5916
5917 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5918 if (uNewEip > pCtx->cs.u32Limit)
5919 return iemRaiseGeneralProtectionFault0(pVCpu);
5920 pCtx->rip = uNewEip;
5921 break;
5922 }
5923
5924 case IEMMODE_64BIT:
5925 {
5926 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5927
5928 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5929 if (!IEM_IS_CANONICAL(uNewRip))
5930 return iemRaiseGeneralProtectionFault0(pVCpu);
5931 pCtx->rip = uNewRip;
5932 break;
5933 }
5934
5935 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5936 }
5937
5938 pCtx->eflags.Bits.u1RF = 0;
5939
5940#ifndef IEM_WITH_CODE_TLB
5941 /* Flush the prefetch buffer. */
5942 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5943#endif
5944
5945 return VINF_SUCCESS;
5946}
5947
5948
5949/**
5950 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5951 *
5952 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5953 * segment limit.
5954 *
5955 * @returns Strict VBox status code.
5956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5957 * @param offNextInstr The offset of the next instruction.
5958 */
5959IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5960{
5961 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5962 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5963
5964 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5965 if ( uNewIp > pCtx->cs.u32Limit
5966 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5967 return iemRaiseGeneralProtectionFault0(pVCpu);
5968 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5969 pCtx->rip = uNewIp;
5970 pCtx->eflags.Bits.u1RF = 0;
5971
5972#ifndef IEM_WITH_CODE_TLB
5973 /* Flush the prefetch buffer. */
5974 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5975#endif
5976
5977 return VINF_SUCCESS;
5978}
5979
5980
5981/**
5982 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5983 *
5984 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5985 * segment limit.
5986 *
5987 * @returns Strict VBox status code.
5988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5989 * @param offNextInstr The offset of the next instruction.
5990 */
5991IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5992{
5993 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5994 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5995
5996 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5997 {
5998 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5999
6000 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6001 if (uNewEip > pCtx->cs.u32Limit)
6002 return iemRaiseGeneralProtectionFault0(pVCpu);
6003 pCtx->rip = uNewEip;
6004 }
6005 else
6006 {
6007 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6008
6009 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6010 if (!IEM_IS_CANONICAL(uNewRip))
6011 return iemRaiseGeneralProtectionFault0(pVCpu);
6012 pCtx->rip = uNewRip;
6013 }
6014 pCtx->eflags.Bits.u1RF = 0;
6015
6016#ifndef IEM_WITH_CODE_TLB
6017 /* Flush the prefetch buffer. */
6018 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6019#endif
6020
6021 return VINF_SUCCESS;
6022}
6023
6024
6025/**
6026 * Performs a near jump to the specified address.
6027 *
6028 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6029 * segment limit.
6030 *
6031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6032 * @param uNewRip The new RIP value.
6033 */
6034IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6035{
6036 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6037 switch (pVCpu->iem.s.enmEffOpSize)
6038 {
6039 case IEMMODE_16BIT:
6040 {
6041 Assert(uNewRip <= UINT16_MAX);
6042 if ( uNewRip > pCtx->cs.u32Limit
6043 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6044 return iemRaiseGeneralProtectionFault0(pVCpu);
6045 /** @todo Test 16-bit jump in 64-bit mode. */
6046 pCtx->rip = uNewRip;
6047 break;
6048 }
6049
6050 case IEMMODE_32BIT:
6051 {
6052 Assert(uNewRip <= UINT32_MAX);
6053 Assert(pCtx->rip <= UINT32_MAX);
6054 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6055
6056 if (uNewRip > pCtx->cs.u32Limit)
6057 return iemRaiseGeneralProtectionFault0(pVCpu);
6058 pCtx->rip = uNewRip;
6059 break;
6060 }
6061
6062 case IEMMODE_64BIT:
6063 {
6064 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6065
6066 if (!IEM_IS_CANONICAL(uNewRip))
6067 return iemRaiseGeneralProtectionFault0(pVCpu);
6068 pCtx->rip = uNewRip;
6069 break;
6070 }
6071
6072 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6073 }
6074
6075 pCtx->eflags.Bits.u1RF = 0;
6076
6077#ifndef IEM_WITH_CODE_TLB
6078 /* Flush the prefetch buffer. */
6079 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6080#endif
6081
6082 return VINF_SUCCESS;
6083}
6084
6085
6086/**
6087 * Get the address of the top of the stack.
6088 *
6089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6090 * @param pCtx The CPU context which SP/ESP/RSP should be
6091 * read.
6092 */
6093DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6094{
6095 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6096 return pCtx->rsp;
6097 if (pCtx->ss.Attr.n.u1DefBig)
6098 return pCtx->esp;
6099 return pCtx->sp;
6100}
6101
6102
6103/**
6104 * Updates the RIP/EIP/IP to point to the next instruction.
6105 *
6106 * This function leaves the EFLAGS.RF flag alone.
6107 *
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 * @param cbInstr The number of bytes to add.
6110 */
6111IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6112{
6113 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6114 switch (pVCpu->iem.s.enmCpuMode)
6115 {
6116 case IEMMODE_16BIT:
6117 Assert(pCtx->rip <= UINT16_MAX);
6118 pCtx->eip += cbInstr;
6119 pCtx->eip &= UINT32_C(0xffff);
6120 break;
6121
6122 case IEMMODE_32BIT:
6123 pCtx->eip += cbInstr;
6124 Assert(pCtx->rip <= UINT32_MAX);
6125 break;
6126
6127 case IEMMODE_64BIT:
6128 pCtx->rip += cbInstr;
6129 break;
6130 default: AssertFailed();
6131 }
6132}
6133
6134
6135#if 0
6136/**
6137 * Updates the RIP/EIP/IP to point to the next instruction.
6138 *
6139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6140 */
6141IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6142{
6143 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6144}
6145#endif
6146
6147
6148
6149/**
6150 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6151 *
6152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6153 * @param cbInstr The number of bytes to add.
6154 */
6155IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6156{
6157 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6158
6159 pCtx->eflags.Bits.u1RF = 0;
6160
6161 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6162#if ARCH_BITS >= 64
6163 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6164 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6165 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6166#else
6167 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6168 pCtx->rip += cbInstr;
6169 else
6170 {
6171 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6172 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6173 }
6174#endif
6175}
6176
6177
6178/**
6179 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6180 *
6181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6182 */
6183IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6184{
6185 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6186}
6187
6188
6189/**
6190 * Adds to the stack pointer.
6191 *
6192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6193 * @param pCtx The CPU context which SP/ESP/RSP should be
6194 * updated.
6195 * @param cbToAdd The number of bytes to add (8-bit!).
6196 */
6197DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6198{
6199 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6200 pCtx->rsp += cbToAdd;
6201 else if (pCtx->ss.Attr.n.u1DefBig)
6202 pCtx->esp += cbToAdd;
6203 else
6204 pCtx->sp += cbToAdd;
6205}
6206
6207
6208/**
6209 * Subtracts from the stack pointer.
6210 *
6211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6212 * @param pCtx The CPU context which SP/ESP/RSP should be
6213 * updated.
6214 * @param cbToSub The number of bytes to subtract (8-bit!).
6215 */
6216DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6217{
6218 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6219 pCtx->rsp -= cbToSub;
6220 else if (pCtx->ss.Attr.n.u1DefBig)
6221 pCtx->esp -= cbToSub;
6222 else
6223 pCtx->sp -= cbToSub;
6224}
6225
6226
6227/**
6228 * Adds to the temporary stack pointer.
6229 *
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6232 * @param cbToAdd The number of bytes to add (16-bit).
6233 * @param pCtx Where to get the current stack mode.
6234 */
6235DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6236{
6237 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6238 pTmpRsp->u += cbToAdd;
6239 else if (pCtx->ss.Attr.n.u1DefBig)
6240 pTmpRsp->DWords.dw0 += cbToAdd;
6241 else
6242 pTmpRsp->Words.w0 += cbToAdd;
6243}
6244
6245
6246/**
6247 * Subtracts from the temporary stack pointer.
6248 *
6249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6250 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6251 * @param cbToSub The number of bytes to subtract.
6252 * @param pCtx Where to get the current stack mode.
6253 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6254 * expecting that.
6255 */
6256DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6257{
6258 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6259 pTmpRsp->u -= cbToSub;
6260 else if (pCtx->ss.Attr.n.u1DefBig)
6261 pTmpRsp->DWords.dw0 -= cbToSub;
6262 else
6263 pTmpRsp->Words.w0 -= cbToSub;
6264}
6265
6266
6267/**
6268 * Calculates the effective stack address for a push of the specified size as
6269 * well as the new RSP value (upper bits may be masked).
6270 *
6271 * @returns Effective stack addressf for the push.
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 * @param pCtx Where to get the current stack mode.
6274 * @param cbItem The size of the stack item to pop.
6275 * @param puNewRsp Where to return the new RSP value.
6276 */
6277DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6278{
6279 RTUINT64U uTmpRsp;
6280 RTGCPTR GCPtrTop;
6281 uTmpRsp.u = pCtx->rsp;
6282
6283 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6284 GCPtrTop = uTmpRsp.u -= cbItem;
6285 else if (pCtx->ss.Attr.n.u1DefBig)
6286 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6287 else
6288 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6289 *puNewRsp = uTmpRsp.u;
6290 return GCPtrTop;
6291}
6292
6293
6294/**
6295 * Gets the current stack pointer and calculates the value after a pop of the
6296 * specified size.
6297 *
6298 * @returns Current stack pointer.
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param pCtx Where to get the current stack mode.
6301 * @param cbItem The size of the stack item to pop.
6302 * @param puNewRsp Where to return the new RSP value.
6303 */
6304DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6305{
6306 RTUINT64U uTmpRsp;
6307 RTGCPTR GCPtrTop;
6308 uTmpRsp.u = pCtx->rsp;
6309
6310 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6311 {
6312 GCPtrTop = uTmpRsp.u;
6313 uTmpRsp.u += cbItem;
6314 }
6315 else if (pCtx->ss.Attr.n.u1DefBig)
6316 {
6317 GCPtrTop = uTmpRsp.DWords.dw0;
6318 uTmpRsp.DWords.dw0 += cbItem;
6319 }
6320 else
6321 {
6322 GCPtrTop = uTmpRsp.Words.w0;
6323 uTmpRsp.Words.w0 += cbItem;
6324 }
6325 *puNewRsp = uTmpRsp.u;
6326 return GCPtrTop;
6327}
6328
6329
6330/**
6331 * Calculates the effective stack address for a push of the specified size as
6332 * well as the new temporary RSP value (upper bits may be masked).
6333 *
6334 * @returns Effective stack addressf for the push.
6335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6336 * @param pCtx Where to get the current stack mode.
6337 * @param pTmpRsp The temporary stack pointer. This is updated.
6338 * @param cbItem The size of the stack item to pop.
6339 */
6340DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6341{
6342 RTGCPTR GCPtrTop;
6343
6344 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6345 GCPtrTop = pTmpRsp->u -= cbItem;
6346 else if (pCtx->ss.Attr.n.u1DefBig)
6347 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6348 else
6349 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6350 return GCPtrTop;
6351}
6352
6353
6354/**
6355 * Gets the effective stack address for a pop of the specified size and
6356 * calculates and updates the temporary RSP.
6357 *
6358 * @returns Current stack pointer.
6359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6360 * @param pCtx Where to get the current stack mode.
6361 * @param pTmpRsp The temporary stack pointer. This is updated.
6362 * @param cbItem The size of the stack item to pop.
6363 */
6364DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6365{
6366 RTGCPTR GCPtrTop;
6367 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6368 {
6369 GCPtrTop = pTmpRsp->u;
6370 pTmpRsp->u += cbItem;
6371 }
6372 else if (pCtx->ss.Attr.n.u1DefBig)
6373 {
6374 GCPtrTop = pTmpRsp->DWords.dw0;
6375 pTmpRsp->DWords.dw0 += cbItem;
6376 }
6377 else
6378 {
6379 GCPtrTop = pTmpRsp->Words.w0;
6380 pTmpRsp->Words.w0 += cbItem;
6381 }
6382 return GCPtrTop;
6383}
6384
6385/** @} */
6386
6387
6388/** @name FPU access and helpers.
6389 *
6390 * @{
6391 */
6392
6393
6394/**
6395 * Hook for preparing to use the host FPU.
6396 *
6397 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6398 *
6399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6400 */
6401DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6402{
6403#ifdef IN_RING3
6404 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6405#else
6406 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6407#endif
6408}
6409
6410
6411/**
6412 * Hook for preparing to use the host FPU for SSE
6413 *
6414 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6415 *
6416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6417 */
6418DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6419{
6420 iemFpuPrepareUsage(pVCpu);
6421}
6422
6423
6424/**
6425 * Hook for actualizing the guest FPU state before the interpreter reads it.
6426 *
6427 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6428 *
6429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6430 */
6431DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6432{
6433#ifdef IN_RING3
6434 NOREF(pVCpu);
6435#else
6436 CPUMRZFpuStateActualizeForRead(pVCpu);
6437#endif
6438}
6439
6440
6441/**
6442 * Hook for actualizing the guest FPU state before the interpreter changes it.
6443 *
6444 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6445 *
6446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6447 */
6448DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6449{
6450#ifdef IN_RING3
6451 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6452#else
6453 CPUMRZFpuStateActualizeForChange(pVCpu);
6454#endif
6455}
6456
6457
6458/**
6459 * Hook for actualizing the guest XMM0..15 register state for read only.
6460 *
6461 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6462 *
6463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6464 */
6465DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6466{
6467#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6468 NOREF(pVCpu);
6469#else
6470 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6471#endif
6472}
6473
6474
6475/**
6476 * Hook for actualizing the guest XMM0..15 register state for read+write.
6477 *
6478 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6479 *
6480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6481 */
6482DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6483{
6484#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6485 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6486#else
6487 CPUMRZFpuStateActualizeForChange(pVCpu);
6488#endif
6489}
6490
6491
6492/**
6493 * Stores a QNaN value into a FPU register.
6494 *
6495 * @param pReg Pointer to the register.
6496 */
6497DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6498{
6499 pReg->au32[0] = UINT32_C(0x00000000);
6500 pReg->au32[1] = UINT32_C(0xc0000000);
6501 pReg->au16[4] = UINT16_C(0xffff);
6502}
6503
6504
6505/**
6506 * Updates the FOP, FPU.CS and FPUIP registers.
6507 *
6508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6509 * @param pCtx The CPU context.
6510 * @param pFpuCtx The FPU context.
6511 */
6512DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6513{
6514 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6515 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6516 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6517 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6518 {
6519 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6520 * happens in real mode here based on the fnsave and fnstenv images. */
6521 pFpuCtx->CS = 0;
6522 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6523 }
6524 else
6525 {
6526 pFpuCtx->CS = pCtx->cs.Sel;
6527 pFpuCtx->FPUIP = pCtx->rip;
6528 }
6529}
6530
6531
6532/**
6533 * Updates the x87.DS and FPUDP registers.
6534 *
6535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6536 * @param pCtx The CPU context.
6537 * @param pFpuCtx The FPU context.
6538 * @param iEffSeg The effective segment register.
6539 * @param GCPtrEff The effective address relative to @a iEffSeg.
6540 */
6541DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6542{
6543 RTSEL sel;
6544 switch (iEffSeg)
6545 {
6546 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6547 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6548 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6549 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6550 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6551 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6552 default:
6553 AssertMsgFailed(("%d\n", iEffSeg));
6554 sel = pCtx->ds.Sel;
6555 }
6556 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6557 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6558 {
6559 pFpuCtx->DS = 0;
6560 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6561 }
6562 else
6563 {
6564 pFpuCtx->DS = sel;
6565 pFpuCtx->FPUDP = GCPtrEff;
6566 }
6567}
6568
6569
6570/**
6571 * Rotates the stack registers in the push direction.
6572 *
6573 * @param pFpuCtx The FPU context.
6574 * @remarks This is a complete waste of time, but fxsave stores the registers in
6575 * stack order.
6576 */
6577DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6578{
6579 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6580 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6581 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6582 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6583 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6584 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6585 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6586 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6587 pFpuCtx->aRegs[0].r80 = r80Tmp;
6588}
6589
6590
6591/**
6592 * Rotates the stack registers in the pop direction.
6593 *
6594 * @param pFpuCtx The FPU context.
6595 * @remarks This is a complete waste of time, but fxsave stores the registers in
6596 * stack order.
6597 */
6598DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6599{
6600 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6601 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6602 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6603 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6604 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6605 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6606 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6607 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6608 pFpuCtx->aRegs[7].r80 = r80Tmp;
6609}
6610
6611
6612/**
6613 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6614 * exception prevents it.
6615 *
6616 * @param pResult The FPU operation result to push.
6617 * @param pFpuCtx The FPU context.
6618 */
6619IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6620{
6621 /* Update FSW and bail if there are pending exceptions afterwards. */
6622 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6623 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6624 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6625 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6626 {
6627 pFpuCtx->FSW = fFsw;
6628 return;
6629 }
6630
6631 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6632 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6633 {
6634 /* All is fine, push the actual value. */
6635 pFpuCtx->FTW |= RT_BIT(iNewTop);
6636 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6637 }
6638 else if (pFpuCtx->FCW & X86_FCW_IM)
6639 {
6640 /* Masked stack overflow, push QNaN. */
6641 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6642 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6643 }
6644 else
6645 {
6646 /* Raise stack overflow, don't push anything. */
6647 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6648 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6649 return;
6650 }
6651
6652 fFsw &= ~X86_FSW_TOP_MASK;
6653 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6654 pFpuCtx->FSW = fFsw;
6655
6656 iemFpuRotateStackPush(pFpuCtx);
6657}
6658
6659
6660/**
6661 * Stores a result in a FPU register and updates the FSW and FTW.
6662 *
6663 * @param pFpuCtx The FPU context.
6664 * @param pResult The result to store.
6665 * @param iStReg Which FPU register to store it in.
6666 */
6667IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6668{
6669 Assert(iStReg < 8);
6670 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6671 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6672 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6673 pFpuCtx->FTW |= RT_BIT(iReg);
6674 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6675}
6676
6677
6678/**
6679 * Only updates the FPU status word (FSW) with the result of the current
6680 * instruction.
6681 *
6682 * @param pFpuCtx The FPU context.
6683 * @param u16FSW The FSW output of the current instruction.
6684 */
6685IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6686{
6687 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6688 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6689}
6690
6691
6692/**
6693 * Pops one item off the FPU stack if no pending exception prevents it.
6694 *
6695 * @param pFpuCtx The FPU context.
6696 */
6697IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6698{
6699 /* Check pending exceptions. */
6700 uint16_t uFSW = pFpuCtx->FSW;
6701 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6702 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6703 return;
6704
6705 /* TOP--. */
6706 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6707 uFSW &= ~X86_FSW_TOP_MASK;
6708 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6709 pFpuCtx->FSW = uFSW;
6710
6711 /* Mark the previous ST0 as empty. */
6712 iOldTop >>= X86_FSW_TOP_SHIFT;
6713 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6714
6715 /* Rotate the registers. */
6716 iemFpuRotateStackPop(pFpuCtx);
6717}
6718
6719
6720/**
6721 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6722 *
6723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6724 * @param pResult The FPU operation result to push.
6725 */
6726IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6727{
6728 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6729 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6730 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6731 iemFpuMaybePushResult(pResult, pFpuCtx);
6732}
6733
6734
6735/**
6736 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6737 * and sets FPUDP and FPUDS.
6738 *
6739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6740 * @param pResult The FPU operation result to push.
6741 * @param iEffSeg The effective segment register.
6742 * @param GCPtrEff The effective address relative to @a iEffSeg.
6743 */
6744IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6745{
6746 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6747 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6748 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6750 iemFpuMaybePushResult(pResult, pFpuCtx);
6751}
6752
6753
6754/**
6755 * Replace ST0 with the first value and push the second onto the FPU stack,
6756 * unless a pending exception prevents it.
6757 *
6758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6759 * @param pResult The FPU operation result to store and push.
6760 */
6761IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6762{
6763 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6764 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6765 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6766
6767 /* Update FSW and bail if there are pending exceptions afterwards. */
6768 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6769 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6770 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6771 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6772 {
6773 pFpuCtx->FSW = fFsw;
6774 return;
6775 }
6776
6777 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6778 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6779 {
6780 /* All is fine, push the actual value. */
6781 pFpuCtx->FTW |= RT_BIT(iNewTop);
6782 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6783 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6784 }
6785 else if (pFpuCtx->FCW & X86_FCW_IM)
6786 {
6787 /* Masked stack overflow, push QNaN. */
6788 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6789 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6790 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6791 }
6792 else
6793 {
6794 /* Raise stack overflow, don't push anything. */
6795 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6796 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6797 return;
6798 }
6799
6800 fFsw &= ~X86_FSW_TOP_MASK;
6801 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6802 pFpuCtx->FSW = fFsw;
6803
6804 iemFpuRotateStackPush(pFpuCtx);
6805}
6806
6807
6808/**
6809 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6810 * FOP.
6811 *
6812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6813 * @param pResult The result to store.
6814 * @param iStReg Which FPU register to store it in.
6815 */
6816IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6817{
6818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6819 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6820 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6821 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6822}
6823
6824
6825/**
6826 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6827 * FOP, and then pops the stack.
6828 *
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 * @param pResult The result to store.
6831 * @param iStReg Which FPU register to store it in.
6832 */
6833IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6834{
6835 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6836 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6837 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6838 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6839 iemFpuMaybePopOne(pFpuCtx);
6840}
6841
6842
6843/**
6844 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6845 * FPUDP, and FPUDS.
6846 *
6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6848 * @param pResult The result to store.
6849 * @param iStReg Which FPU register to store it in.
6850 * @param iEffSeg The effective memory operand selector register.
6851 * @param GCPtrEff The effective memory operand offset.
6852 */
6853IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6854 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6855{
6856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6857 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6858 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6859 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6860 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6861}
6862
6863
6864/**
6865 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6866 * FPUDP, and FPUDS, and then pops the stack.
6867 *
6868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6869 * @param pResult The result to store.
6870 * @param iStReg Which FPU register to store it in.
6871 * @param iEffSeg The effective memory operand selector register.
6872 * @param GCPtrEff The effective memory operand offset.
6873 */
6874IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6875 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6876{
6877 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6878 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6879 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6881 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6882 iemFpuMaybePopOne(pFpuCtx);
6883}
6884
6885
6886/**
6887 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6888 *
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 */
6891IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6892{
6893 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6894 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6895 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6896}
6897
6898
6899/**
6900 * Marks the specified stack register as free (for FFREE).
6901 *
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 * @param iStReg The register to free.
6904 */
6905IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6906{
6907 Assert(iStReg < 8);
6908 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6909 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6910 pFpuCtx->FTW &= ~RT_BIT(iReg);
6911}
6912
6913
6914/**
6915 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6916 *
6917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6918 */
6919IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6920{
6921 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6922 uint16_t uFsw = pFpuCtx->FSW;
6923 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6924 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6925 uFsw &= ~X86_FSW_TOP_MASK;
6926 uFsw |= uTop;
6927 pFpuCtx->FSW = uFsw;
6928}
6929
6930
6931/**
6932 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6933 *
6934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6935 */
6936IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6937{
6938 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6939 uint16_t uFsw = pFpuCtx->FSW;
6940 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6941 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6942 uFsw &= ~X86_FSW_TOP_MASK;
6943 uFsw |= uTop;
6944 pFpuCtx->FSW = uFsw;
6945}
6946
6947
6948/**
6949 * Updates the FSW, FOP, FPUIP, and FPUCS.
6950 *
6951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6952 * @param u16FSW The FSW from the current instruction.
6953 */
6954IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6955{
6956 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6957 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6958 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6959 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6960}
6961
6962
6963/**
6964 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6965 *
6966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6967 * @param u16FSW The FSW from the current instruction.
6968 */
6969IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6970{
6971 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6972 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6973 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6974 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6975 iemFpuMaybePopOne(pFpuCtx);
6976}
6977
6978
6979/**
6980 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6981 *
6982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6983 * @param u16FSW The FSW from the current instruction.
6984 * @param iEffSeg The effective memory operand selector register.
6985 * @param GCPtrEff The effective memory operand offset.
6986 */
6987IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6988{
6989 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6990 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6991 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6992 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6993 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6994}
6995
6996
6997/**
6998 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6999 *
7000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7001 * @param u16FSW The FSW from the current instruction.
7002 */
7003IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7004{
7005 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7006 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7007 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7008 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7009 iemFpuMaybePopOne(pFpuCtx);
7010 iemFpuMaybePopOne(pFpuCtx);
7011}
7012
7013
7014/**
7015 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7016 *
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 * @param u16FSW The FSW from the current instruction.
7019 * @param iEffSeg The effective memory operand selector register.
7020 * @param GCPtrEff The effective memory operand offset.
7021 */
7022IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7023{
7024 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7025 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7026 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7027 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7028 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7029 iemFpuMaybePopOne(pFpuCtx);
7030}
7031
7032
7033/**
7034 * Worker routine for raising an FPU stack underflow exception.
7035 *
7036 * @param pFpuCtx The FPU context.
7037 * @param iStReg The stack register being accessed.
7038 */
7039IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7040{
7041 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7042 if (pFpuCtx->FCW & X86_FCW_IM)
7043 {
7044 /* Masked underflow. */
7045 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7046 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7047 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7048 if (iStReg != UINT8_MAX)
7049 {
7050 pFpuCtx->FTW |= RT_BIT(iReg);
7051 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7052 }
7053 }
7054 else
7055 {
7056 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7057 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7058 }
7059}
7060
7061
7062/**
7063 * Raises a FPU stack underflow exception.
7064 *
7065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7066 * @param iStReg The destination register that should be loaded
7067 * with QNaN if \#IS is not masked. Specify
7068 * UINT8_MAX if none (like for fcom).
7069 */
7070DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7071{
7072 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7073 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7074 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7075 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7076}
7077
7078
7079DECL_NO_INLINE(IEM_STATIC, void)
7080iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7081{
7082 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7083 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7084 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7085 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7086 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7087}
7088
7089
7090DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7091{
7092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7093 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7094 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7095 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7096 iemFpuMaybePopOne(pFpuCtx);
7097}
7098
7099
7100DECL_NO_INLINE(IEM_STATIC, void)
7101iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7102{
7103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7104 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7105 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7106 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7107 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7108 iemFpuMaybePopOne(pFpuCtx);
7109}
7110
7111
7112DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7113{
7114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7115 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7116 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7117 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7118 iemFpuMaybePopOne(pFpuCtx);
7119 iemFpuMaybePopOne(pFpuCtx);
7120}
7121
7122
7123DECL_NO_INLINE(IEM_STATIC, void)
7124iemFpuStackPushUnderflow(PVMCPU pVCpu)
7125{
7126 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7127 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7128 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7129
7130 if (pFpuCtx->FCW & X86_FCW_IM)
7131 {
7132 /* Masked overflow - Push QNaN. */
7133 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7134 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7135 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7136 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7137 pFpuCtx->FTW |= RT_BIT(iNewTop);
7138 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7139 iemFpuRotateStackPush(pFpuCtx);
7140 }
7141 else
7142 {
7143 /* Exception pending - don't change TOP or the register stack. */
7144 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7145 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7146 }
7147}
7148
7149
7150DECL_NO_INLINE(IEM_STATIC, void)
7151iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7152{
7153 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7154 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7155 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7156
7157 if (pFpuCtx->FCW & X86_FCW_IM)
7158 {
7159 /* Masked overflow - Push QNaN. */
7160 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7161 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7162 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7163 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7164 pFpuCtx->FTW |= RT_BIT(iNewTop);
7165 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7166 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7167 iemFpuRotateStackPush(pFpuCtx);
7168 }
7169 else
7170 {
7171 /* Exception pending - don't change TOP or the register stack. */
7172 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7173 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7174 }
7175}
7176
7177
7178/**
7179 * Worker routine for raising an FPU stack overflow exception on a push.
7180 *
7181 * @param pFpuCtx The FPU context.
7182 */
7183IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7184{
7185 if (pFpuCtx->FCW & X86_FCW_IM)
7186 {
7187 /* Masked overflow. */
7188 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7189 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7190 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7191 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7192 pFpuCtx->FTW |= RT_BIT(iNewTop);
7193 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7194 iemFpuRotateStackPush(pFpuCtx);
7195 }
7196 else
7197 {
7198 /* Exception pending - don't change TOP or the register stack. */
7199 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7200 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7201 }
7202}
7203
7204
7205/**
7206 * Raises a FPU stack overflow exception on a push.
7207 *
7208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7209 */
7210DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7211{
7212 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7213 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7214 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7215 iemFpuStackPushOverflowOnly(pFpuCtx);
7216}
7217
7218
7219/**
7220 * Raises a FPU stack overflow exception on a push with a memory operand.
7221 *
7222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7223 * @param iEffSeg The effective memory operand selector register.
7224 * @param GCPtrEff The effective memory operand offset.
7225 */
7226DECL_NO_INLINE(IEM_STATIC, void)
7227iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7228{
7229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7231 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7233 iemFpuStackPushOverflowOnly(pFpuCtx);
7234}
7235
7236
7237IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7238{
7239 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7240 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7241 if (pFpuCtx->FTW & RT_BIT(iReg))
7242 return VINF_SUCCESS;
7243 return VERR_NOT_FOUND;
7244}
7245
7246
7247IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7248{
7249 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7250 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7251 if (pFpuCtx->FTW & RT_BIT(iReg))
7252 {
7253 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7254 return VINF_SUCCESS;
7255 }
7256 return VERR_NOT_FOUND;
7257}
7258
7259
7260IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7261 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7262{
7263 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7264 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7265 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7266 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7267 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7268 {
7269 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7270 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7271 return VINF_SUCCESS;
7272 }
7273 return VERR_NOT_FOUND;
7274}
7275
7276
7277IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7278{
7279 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7280 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7281 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7282 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7283 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7284 {
7285 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7286 return VINF_SUCCESS;
7287 }
7288 return VERR_NOT_FOUND;
7289}
7290
7291
7292/**
7293 * Updates the FPU exception status after FCW is changed.
7294 *
7295 * @param pFpuCtx The FPU context.
7296 */
7297IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7298{
7299 uint16_t u16Fsw = pFpuCtx->FSW;
7300 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7301 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7302 else
7303 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7304 pFpuCtx->FSW = u16Fsw;
7305}
7306
7307
7308/**
7309 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7310 *
7311 * @returns The full FTW.
7312 * @param pFpuCtx The FPU context.
7313 */
7314IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7315{
7316 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7317 uint16_t u16Ftw = 0;
7318 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7319 for (unsigned iSt = 0; iSt < 8; iSt++)
7320 {
7321 unsigned const iReg = (iSt + iTop) & 7;
7322 if (!(u8Ftw & RT_BIT(iReg)))
7323 u16Ftw |= 3 << (iReg * 2); /* empty */
7324 else
7325 {
7326 uint16_t uTag;
7327 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7328 if (pr80Reg->s.uExponent == 0x7fff)
7329 uTag = 2; /* Exponent is all 1's => Special. */
7330 else if (pr80Reg->s.uExponent == 0x0000)
7331 {
7332 if (pr80Reg->s.u64Mantissa == 0x0000)
7333 uTag = 1; /* All bits are zero => Zero. */
7334 else
7335 uTag = 2; /* Must be special. */
7336 }
7337 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7338 uTag = 0; /* Valid. */
7339 else
7340 uTag = 2; /* Must be special. */
7341
7342 u16Ftw |= uTag << (iReg * 2); /* empty */
7343 }
7344 }
7345
7346 return u16Ftw;
7347}
7348
7349
7350/**
7351 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7352 *
7353 * @returns The compressed FTW.
7354 * @param u16FullFtw The full FTW to convert.
7355 */
7356IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7357{
7358 uint8_t u8Ftw = 0;
7359 for (unsigned i = 0; i < 8; i++)
7360 {
7361 if ((u16FullFtw & 3) != 3 /*empty*/)
7362 u8Ftw |= RT_BIT(i);
7363 u16FullFtw >>= 2;
7364 }
7365
7366 return u8Ftw;
7367}
7368
7369/** @} */
7370
7371
7372/** @name Memory access.
7373 *
7374 * @{
7375 */
7376
7377
7378/**
7379 * Updates the IEMCPU::cbWritten counter if applicable.
7380 *
7381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7382 * @param fAccess The access being accounted for.
7383 * @param cbMem The access size.
7384 */
7385DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7386{
7387 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7388 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7389 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7390}
7391
7392
7393/**
7394 * Checks if the given segment can be written to, raise the appropriate
7395 * exception if not.
7396 *
7397 * @returns VBox strict status code.
7398 *
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 * @param pHid Pointer to the hidden register.
7401 * @param iSegReg The register number.
7402 * @param pu64BaseAddr Where to return the base address to use for the
7403 * segment. (In 64-bit code it may differ from the
7404 * base in the hidden segment.)
7405 */
7406IEM_STATIC VBOXSTRICTRC
7407iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7408{
7409 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7410 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7411 else
7412 {
7413 if (!pHid->Attr.n.u1Present)
7414 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7415
7416 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7417 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7418 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7419 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7420 *pu64BaseAddr = pHid->u64Base;
7421 }
7422 return VINF_SUCCESS;
7423}
7424
7425
7426/**
7427 * Checks if the given segment can be read from, raise the appropriate
7428 * exception if not.
7429 *
7430 * @returns VBox strict status code.
7431 *
7432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7433 * @param pHid Pointer to the hidden register.
7434 * @param iSegReg The register number.
7435 * @param pu64BaseAddr Where to return the base address to use for the
7436 * segment. (In 64-bit code it may differ from the
7437 * base in the hidden segment.)
7438 */
7439IEM_STATIC VBOXSTRICTRC
7440iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7441{
7442 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7443 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7444 else
7445 {
7446 if (!pHid->Attr.n.u1Present)
7447 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7448
7449 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7450 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7451 *pu64BaseAddr = pHid->u64Base;
7452 }
7453 return VINF_SUCCESS;
7454}
7455
7456
7457/**
7458 * Applies the segment limit, base and attributes.
7459 *
7460 * This may raise a \#GP or \#SS.
7461 *
7462 * @returns VBox strict status code.
7463 *
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 * @param fAccess The kind of access which is being performed.
7466 * @param iSegReg The index of the segment register to apply.
7467 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7468 * TSS, ++).
7469 * @param cbMem The access size.
7470 * @param pGCPtrMem Pointer to the guest memory address to apply
7471 * segmentation to. Input and output parameter.
7472 */
7473IEM_STATIC VBOXSTRICTRC
7474iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7475{
7476 if (iSegReg == UINT8_MAX)
7477 return VINF_SUCCESS;
7478
7479 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7480 switch (pVCpu->iem.s.enmCpuMode)
7481 {
7482 case IEMMODE_16BIT:
7483 case IEMMODE_32BIT:
7484 {
7485 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7486 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7487
7488 if ( pSel->Attr.n.u1Present
7489 && !pSel->Attr.n.u1Unusable)
7490 {
7491 Assert(pSel->Attr.n.u1DescType);
7492 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7493 {
7494 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7495 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7496 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7497
7498 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7499 {
7500 /** @todo CPL check. */
7501 }
7502
7503 /*
7504 * There are two kinds of data selectors, normal and expand down.
7505 */
7506 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7507 {
7508 if ( GCPtrFirst32 > pSel->u32Limit
7509 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7510 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7511 }
7512 else
7513 {
7514 /*
7515 * The upper boundary is defined by the B bit, not the G bit!
7516 */
7517 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7518 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7519 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7520 }
7521 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7522 }
7523 else
7524 {
7525
7526 /*
7527 * Code selector and usually be used to read thru, writing is
7528 * only permitted in real and V8086 mode.
7529 */
7530 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7531 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7532 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7533 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7534 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7535
7536 if ( GCPtrFirst32 > pSel->u32Limit
7537 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7538 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7539
7540 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7541 {
7542 /** @todo CPL check. */
7543 }
7544
7545 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7546 }
7547 }
7548 else
7549 return iemRaiseGeneralProtectionFault0(pVCpu);
7550 return VINF_SUCCESS;
7551 }
7552
7553 case IEMMODE_64BIT:
7554 {
7555 RTGCPTR GCPtrMem = *pGCPtrMem;
7556 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7557 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7558
7559 Assert(cbMem >= 1);
7560 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7561 return VINF_SUCCESS;
7562 return iemRaiseGeneralProtectionFault0(pVCpu);
7563 }
7564
7565 default:
7566 AssertFailedReturn(VERR_IEM_IPE_7);
7567 }
7568}
7569
7570
7571/**
7572 * Translates a virtual address to a physical physical address and checks if we
7573 * can access the page as specified.
7574 *
7575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7576 * @param GCPtrMem The virtual address.
7577 * @param fAccess The intended access.
7578 * @param pGCPhysMem Where to return the physical address.
7579 */
7580IEM_STATIC VBOXSTRICTRC
7581iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7582{
7583 /** @todo Need a different PGM interface here. We're currently using
7584 * generic / REM interfaces. this won't cut it for R0 & RC. */
7585 RTGCPHYS GCPhys;
7586 uint64_t fFlags;
7587 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7588 if (RT_FAILURE(rc))
7589 {
7590 /** @todo Check unassigned memory in unpaged mode. */
7591 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7592 *pGCPhysMem = NIL_RTGCPHYS;
7593 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7594 }
7595
7596 /* If the page is writable and does not have the no-exec bit set, all
7597 access is allowed. Otherwise we'll have to check more carefully... */
7598 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7599 {
7600 /* Write to read only memory? */
7601 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7602 && !(fFlags & X86_PTE_RW)
7603 && ( pVCpu->iem.s.uCpl != 0
7604 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7605 {
7606 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7607 *pGCPhysMem = NIL_RTGCPHYS;
7608 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7609 }
7610
7611 /* Kernel memory accessed by userland? */
7612 if ( !(fFlags & X86_PTE_US)
7613 && pVCpu->iem.s.uCpl == 3
7614 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7615 {
7616 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7617 *pGCPhysMem = NIL_RTGCPHYS;
7618 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7619 }
7620
7621 /* Executing non-executable memory? */
7622 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7623 && (fFlags & X86_PTE_PAE_NX)
7624 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7625 {
7626 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7627 *pGCPhysMem = NIL_RTGCPHYS;
7628 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7629 VERR_ACCESS_DENIED);
7630 }
7631 }
7632
7633 /*
7634 * Set the dirty / access flags.
7635 * ASSUMES this is set when the address is translated rather than on committ...
7636 */
7637 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7638 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7639 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7640 {
7641 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7642 AssertRC(rc2);
7643 }
7644
7645 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7646 *pGCPhysMem = GCPhys;
7647 return VINF_SUCCESS;
7648}
7649
7650
7651
7652/**
7653 * Maps a physical page.
7654 *
7655 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7657 * @param GCPhysMem The physical address.
7658 * @param fAccess The intended access.
7659 * @param ppvMem Where to return the mapping address.
7660 * @param pLock The PGM lock.
7661 */
7662IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7663{
7664#ifdef IEM_VERIFICATION_MODE_FULL
7665 /* Force the alternative path so we can ignore writes. */
7666 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7667 {
7668 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7669 {
7670 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7671 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7672 if (RT_FAILURE(rc2))
7673 pVCpu->iem.s.fProblematicMemory = true;
7674 }
7675 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7676 }
7677#endif
7678#ifdef IEM_LOG_MEMORY_WRITES
7679 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7680 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7681#endif
7682#ifdef IEM_VERIFICATION_MODE_MINIMAL
7683 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7684#endif
7685
7686 /** @todo This API may require some improving later. A private deal with PGM
7687 * regarding locking and unlocking needs to be struct. A couple of TLBs
7688 * living in PGM, but with publicly accessible inlined access methods
7689 * could perhaps be an even better solution. */
7690 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7691 GCPhysMem,
7692 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7693 pVCpu->iem.s.fBypassHandlers,
7694 ppvMem,
7695 pLock);
7696 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7697 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7698
7699#ifdef IEM_VERIFICATION_MODE_FULL
7700 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7701 pVCpu->iem.s.fProblematicMemory = true;
7702#endif
7703 return rc;
7704}
7705
7706
7707/**
7708 * Unmap a page previously mapped by iemMemPageMap.
7709 *
7710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7711 * @param GCPhysMem The physical address.
7712 * @param fAccess The intended access.
7713 * @param pvMem What iemMemPageMap returned.
7714 * @param pLock The PGM lock.
7715 */
7716DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7717{
7718 NOREF(pVCpu);
7719 NOREF(GCPhysMem);
7720 NOREF(fAccess);
7721 NOREF(pvMem);
7722 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7723}
7724
7725
7726/**
7727 * Looks up a memory mapping entry.
7728 *
7729 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param pvMem The memory address.
7732 * @param fAccess The access to.
7733 */
7734DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7735{
7736 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7737 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7738 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7739 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7740 return 0;
7741 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7742 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7743 return 1;
7744 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7745 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7746 return 2;
7747 return VERR_NOT_FOUND;
7748}
7749
7750
7751/**
7752 * Finds a free memmap entry when using iNextMapping doesn't work.
7753 *
7754 * @returns Memory mapping index, 1024 on failure.
7755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7756 */
7757IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7758{
7759 /*
7760 * The easy case.
7761 */
7762 if (pVCpu->iem.s.cActiveMappings == 0)
7763 {
7764 pVCpu->iem.s.iNextMapping = 1;
7765 return 0;
7766 }
7767
7768 /* There should be enough mappings for all instructions. */
7769 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7770
7771 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7772 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7773 return i;
7774
7775 AssertFailedReturn(1024);
7776}
7777
7778
7779/**
7780 * Commits a bounce buffer that needs writing back and unmaps it.
7781 *
7782 * @returns Strict VBox status code.
7783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7784 * @param iMemMap The index of the buffer to commit.
7785 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7786 * Always false in ring-3, obviously.
7787 */
7788IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7789{
7790 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7791 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7792#ifdef IN_RING3
7793 Assert(!fPostponeFail);
7794 RT_NOREF_PV(fPostponeFail);
7795#endif
7796
7797 /*
7798 * Do the writing.
7799 */
7800#ifndef IEM_VERIFICATION_MODE_MINIMAL
7801 PVM pVM = pVCpu->CTX_SUFF(pVM);
7802 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7803 && !IEM_VERIFICATION_ENABLED(pVCpu))
7804 {
7805 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7806 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7807 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7808 if (!pVCpu->iem.s.fBypassHandlers)
7809 {
7810 /*
7811 * Carefully and efficiently dealing with access handler return
7812 * codes make this a little bloated.
7813 */
7814 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7816 pbBuf,
7817 cbFirst,
7818 PGMACCESSORIGIN_IEM);
7819 if (rcStrict == VINF_SUCCESS)
7820 {
7821 if (cbSecond)
7822 {
7823 rcStrict = PGMPhysWrite(pVM,
7824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7825 pbBuf + cbFirst,
7826 cbSecond,
7827 PGMACCESSORIGIN_IEM);
7828 if (rcStrict == VINF_SUCCESS)
7829 { /* nothing */ }
7830 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7831 {
7832 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7836 }
7837# ifndef IN_RING3
7838 else if (fPostponeFail)
7839 {
7840 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7843 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7844 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7845 return iemSetPassUpStatus(pVCpu, rcStrict);
7846 }
7847# endif
7848 else
7849 {
7850 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7853 return rcStrict;
7854 }
7855 }
7856 }
7857 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7858 {
7859 if (!cbSecond)
7860 {
7861 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7863 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7864 }
7865 else
7866 {
7867 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7869 pbBuf + cbFirst,
7870 cbSecond,
7871 PGMACCESSORIGIN_IEM);
7872 if (rcStrict2 == VINF_SUCCESS)
7873 {
7874 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7875 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7877 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7878 }
7879 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7880 {
7881 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7884 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7885 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7886 }
7887# ifndef IN_RING3
7888 else if (fPostponeFail)
7889 {
7890 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7893 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7894 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7895 return iemSetPassUpStatus(pVCpu, rcStrict);
7896 }
7897# endif
7898 else
7899 {
7900 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7903 return rcStrict2;
7904 }
7905 }
7906 }
7907# ifndef IN_RING3
7908 else if (fPostponeFail)
7909 {
7910 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7913 if (!cbSecond)
7914 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7915 else
7916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7917 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7918 return iemSetPassUpStatus(pVCpu, rcStrict);
7919 }
7920# endif
7921 else
7922 {
7923 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7926 return rcStrict;
7927 }
7928 }
7929 else
7930 {
7931 /*
7932 * No access handlers, much simpler.
7933 */
7934 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7935 if (RT_SUCCESS(rc))
7936 {
7937 if (cbSecond)
7938 {
7939 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7940 if (RT_SUCCESS(rc))
7941 { /* likely */ }
7942 else
7943 {
7944 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7945 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7947 return rc;
7948 }
7949 }
7950 }
7951 else
7952 {
7953 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7955 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7956 return rc;
7957 }
7958 }
7959 }
7960#endif
7961
7962#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7963 /*
7964 * Record the write(s).
7965 */
7966 if (!pVCpu->iem.s.fNoRem)
7967 {
7968 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7969 if (pEvtRec)
7970 {
7971 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7972 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7973 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7974 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7975 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7976 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7977 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7978 }
7979 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7980 {
7981 pEvtRec = iemVerifyAllocRecord(pVCpu);
7982 if (pEvtRec)
7983 {
7984 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7985 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7986 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7987 memcpy(pEvtRec->u.RamWrite.ab,
7988 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7989 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7990 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7991 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7992 }
7993 }
7994 }
7995#endif
7996#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7997 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7998 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7999 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8000 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8001 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8002 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8003
8004 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8005 g_cbIemWrote = cbWrote;
8006 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8007#endif
8008
8009 /*
8010 * Free the mapping entry.
8011 */
8012 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8013 Assert(pVCpu->iem.s.cActiveMappings != 0);
8014 pVCpu->iem.s.cActiveMappings--;
8015 return VINF_SUCCESS;
8016}
8017
8018
8019/**
8020 * iemMemMap worker that deals with a request crossing pages.
8021 */
8022IEM_STATIC VBOXSTRICTRC
8023iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8024{
8025 /*
8026 * Do the address translations.
8027 */
8028 RTGCPHYS GCPhysFirst;
8029 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8030 if (rcStrict != VINF_SUCCESS)
8031 return rcStrict;
8032
8033 RTGCPHYS GCPhysSecond;
8034 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8035 fAccess, &GCPhysSecond);
8036 if (rcStrict != VINF_SUCCESS)
8037 return rcStrict;
8038 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8039
8040 PVM pVM = pVCpu->CTX_SUFF(pVM);
8041#ifdef IEM_VERIFICATION_MODE_FULL
8042 /*
8043 * Detect problematic memory when verifying so we can select
8044 * the right execution engine. (TLB: Redo this.)
8045 */
8046 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8047 {
8048 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8049 if (RT_SUCCESS(rc2))
8050 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8051 if (RT_FAILURE(rc2))
8052 pVCpu->iem.s.fProblematicMemory = true;
8053 }
8054#endif
8055
8056
8057 /*
8058 * Read in the current memory content if it's a read, execute or partial
8059 * write access.
8060 */
8061 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8062 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8063 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8064
8065 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8066 {
8067 if (!pVCpu->iem.s.fBypassHandlers)
8068 {
8069 /*
8070 * Must carefully deal with access handler status codes here,
8071 * makes the code a bit bloated.
8072 */
8073 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8074 if (rcStrict == VINF_SUCCESS)
8075 {
8076 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8077 if (rcStrict == VINF_SUCCESS)
8078 { /*likely */ }
8079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8080 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8081 else
8082 {
8083 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8084 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8085 return rcStrict;
8086 }
8087 }
8088 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8089 {
8090 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8091 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8092 {
8093 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8094 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8095 }
8096 else
8097 {
8098 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8099 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8100 return rcStrict2;
8101 }
8102 }
8103 else
8104 {
8105 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8106 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8107 return rcStrict;
8108 }
8109 }
8110 else
8111 {
8112 /*
8113 * No informational status codes here, much more straight forward.
8114 */
8115 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8116 if (RT_SUCCESS(rc))
8117 {
8118 Assert(rc == VINF_SUCCESS);
8119 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8120 if (RT_SUCCESS(rc))
8121 Assert(rc == VINF_SUCCESS);
8122 else
8123 {
8124 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8125 return rc;
8126 }
8127 }
8128 else
8129 {
8130 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8131 return rc;
8132 }
8133 }
8134
8135#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8136 if ( !pVCpu->iem.s.fNoRem
8137 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8138 {
8139 /*
8140 * Record the reads.
8141 */
8142 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8143 if (pEvtRec)
8144 {
8145 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8146 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8147 pEvtRec->u.RamRead.cb = cbFirstPage;
8148 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8149 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8150 }
8151 pEvtRec = iemVerifyAllocRecord(pVCpu);
8152 if (pEvtRec)
8153 {
8154 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8155 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8156 pEvtRec->u.RamRead.cb = cbSecondPage;
8157 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8158 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8159 }
8160 }
8161#endif
8162 }
8163#ifdef VBOX_STRICT
8164 else
8165 memset(pbBuf, 0xcc, cbMem);
8166 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8167 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8168#endif
8169
8170 /*
8171 * Commit the bounce buffer entry.
8172 */
8173 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8174 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8175 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8176 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8177 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8178 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8179 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8180 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8181 pVCpu->iem.s.cActiveMappings++;
8182
8183 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8184 *ppvMem = pbBuf;
8185 return VINF_SUCCESS;
8186}
8187
8188
8189/**
8190 * iemMemMap woker that deals with iemMemPageMap failures.
8191 */
8192IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8193 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8194{
8195 /*
8196 * Filter out conditions we can handle and the ones which shouldn't happen.
8197 */
8198 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8199 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8200 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8201 {
8202 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8203 return rcMap;
8204 }
8205 pVCpu->iem.s.cPotentialExits++;
8206
8207 /*
8208 * Read in the current memory content if it's a read, execute or partial
8209 * write access.
8210 */
8211 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8212 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8213 {
8214 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8215 memset(pbBuf, 0xff, cbMem);
8216 else
8217 {
8218 int rc;
8219 if (!pVCpu->iem.s.fBypassHandlers)
8220 {
8221 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8222 if (rcStrict == VINF_SUCCESS)
8223 { /* nothing */ }
8224 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8225 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8226 else
8227 {
8228 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8229 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8230 return rcStrict;
8231 }
8232 }
8233 else
8234 {
8235 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8236 if (RT_SUCCESS(rc))
8237 { /* likely */ }
8238 else
8239 {
8240 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8241 GCPhysFirst, rc));
8242 return rc;
8243 }
8244 }
8245 }
8246
8247#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8248 if ( !pVCpu->iem.s.fNoRem
8249 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8250 {
8251 /*
8252 * Record the read.
8253 */
8254 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8255 if (pEvtRec)
8256 {
8257 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8258 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8259 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8260 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8261 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8262 }
8263 }
8264#endif
8265 }
8266#ifdef VBOX_STRICT
8267 else
8268 memset(pbBuf, 0xcc, cbMem);
8269#endif
8270#ifdef VBOX_STRICT
8271 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8272 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8273#endif
8274
8275 /*
8276 * Commit the bounce buffer entry.
8277 */
8278 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8279 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8280 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8281 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8282 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8283 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8284 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8285 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8286 pVCpu->iem.s.cActiveMappings++;
8287
8288 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8289 *ppvMem = pbBuf;
8290 return VINF_SUCCESS;
8291}
8292
8293
8294
8295/**
8296 * Maps the specified guest memory for the given kind of access.
8297 *
8298 * This may be using bounce buffering of the memory if it's crossing a page
8299 * boundary or if there is an access handler installed for any of it. Because
8300 * of lock prefix guarantees, we're in for some extra clutter when this
8301 * happens.
8302 *
8303 * This may raise a \#GP, \#SS, \#PF or \#AC.
8304 *
8305 * @returns VBox strict status code.
8306 *
8307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8308 * @param ppvMem Where to return the pointer to the mapped
8309 * memory.
8310 * @param cbMem The number of bytes to map. This is usually 1,
8311 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8312 * string operations it can be up to a page.
8313 * @param iSegReg The index of the segment register to use for
8314 * this access. The base and limits are checked.
8315 * Use UINT8_MAX to indicate that no segmentation
8316 * is required (for IDT, GDT and LDT accesses).
8317 * @param GCPtrMem The address of the guest memory.
8318 * @param fAccess How the memory is being accessed. The
8319 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8320 * how to map the memory, while the
8321 * IEM_ACCESS_WHAT_XXX bit is used when raising
8322 * exceptions.
8323 */
8324IEM_STATIC VBOXSTRICTRC
8325iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8326{
8327 /*
8328 * Check the input and figure out which mapping entry to use.
8329 */
8330 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8331 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8332 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8333
8334 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8335 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8336 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8337 {
8338 iMemMap = iemMemMapFindFree(pVCpu);
8339 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8340 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8341 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8342 pVCpu->iem.s.aMemMappings[2].fAccess),
8343 VERR_IEM_IPE_9);
8344 }
8345
8346 /*
8347 * Map the memory, checking that we can actually access it. If something
8348 * slightly complicated happens, fall back on bounce buffering.
8349 */
8350 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8351 if (rcStrict != VINF_SUCCESS)
8352 return rcStrict;
8353
8354 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8355 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8356
8357 RTGCPHYS GCPhysFirst;
8358 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8359 if (rcStrict != VINF_SUCCESS)
8360 return rcStrict;
8361
8362 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8363 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8364 if (fAccess & IEM_ACCESS_TYPE_READ)
8365 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8366
8367 void *pvMem;
8368 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8369 if (rcStrict != VINF_SUCCESS)
8370 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8371
8372 /*
8373 * Fill in the mapping table entry.
8374 */
8375 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8376 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8377 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8378 pVCpu->iem.s.cActiveMappings++;
8379
8380 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8381 *ppvMem = pvMem;
8382 return VINF_SUCCESS;
8383}
8384
8385
8386/**
8387 * Commits the guest memory if bounce buffered and unmaps it.
8388 *
8389 * @returns Strict VBox status code.
8390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8391 * @param pvMem The mapping.
8392 * @param fAccess The kind of access.
8393 */
8394IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8395{
8396 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8397 AssertReturn(iMemMap >= 0, iMemMap);
8398
8399 /* If it's bounce buffered, we may need to write back the buffer. */
8400 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8401 {
8402 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8403 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8404 }
8405 /* Otherwise unlock it. */
8406 else
8407 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8408
8409 /* Free the entry. */
8410 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8411 Assert(pVCpu->iem.s.cActiveMappings != 0);
8412 pVCpu->iem.s.cActiveMappings--;
8413 return VINF_SUCCESS;
8414}
8415
8416#ifdef IEM_WITH_SETJMP
8417
8418/**
8419 * Maps the specified guest memory for the given kind of access, longjmp on
8420 * error.
8421 *
8422 * This may be using bounce buffering of the memory if it's crossing a page
8423 * boundary or if there is an access handler installed for any of it. Because
8424 * of lock prefix guarantees, we're in for some extra clutter when this
8425 * happens.
8426 *
8427 * This may raise a \#GP, \#SS, \#PF or \#AC.
8428 *
8429 * @returns Pointer to the mapped memory.
8430 *
8431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8432 * @param cbMem The number of bytes to map. This is usually 1,
8433 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8434 * string operations it can be up to a page.
8435 * @param iSegReg The index of the segment register to use for
8436 * this access. The base and limits are checked.
8437 * Use UINT8_MAX to indicate that no segmentation
8438 * is required (for IDT, GDT and LDT accesses).
8439 * @param GCPtrMem The address of the guest memory.
8440 * @param fAccess How the memory is being accessed. The
8441 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8442 * how to map the memory, while the
8443 * IEM_ACCESS_WHAT_XXX bit is used when raising
8444 * exceptions.
8445 */
8446IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8447{
8448 /*
8449 * Check the input and figure out which mapping entry to use.
8450 */
8451 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8452 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8453 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8454
8455 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8456 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8457 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8458 {
8459 iMemMap = iemMemMapFindFree(pVCpu);
8460 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8461 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8462 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8463 pVCpu->iem.s.aMemMappings[2].fAccess),
8464 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8465 }
8466
8467 /*
8468 * Map the memory, checking that we can actually access it. If something
8469 * slightly complicated happens, fall back on bounce buffering.
8470 */
8471 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8472 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8473 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8474
8475 /* Crossing a page boundary? */
8476 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8477 { /* No (likely). */ }
8478 else
8479 {
8480 void *pvMem;
8481 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8482 if (rcStrict == VINF_SUCCESS)
8483 return pvMem;
8484 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8485 }
8486
8487 RTGCPHYS GCPhysFirst;
8488 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8489 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8490 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8491
8492 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8493 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8494 if (fAccess & IEM_ACCESS_TYPE_READ)
8495 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8496
8497 void *pvMem;
8498 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8499 if (rcStrict == VINF_SUCCESS)
8500 { /* likely */ }
8501 else
8502 {
8503 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8504 if (rcStrict == VINF_SUCCESS)
8505 return pvMem;
8506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8507 }
8508
8509 /*
8510 * Fill in the mapping table entry.
8511 */
8512 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8513 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8514 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8515 pVCpu->iem.s.cActiveMappings++;
8516
8517 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8518 return pvMem;
8519}
8520
8521
8522/**
8523 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8524 *
8525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8526 * @param pvMem The mapping.
8527 * @param fAccess The kind of access.
8528 */
8529IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8530{
8531 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8532 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8533
8534 /* If it's bounce buffered, we may need to write back the buffer. */
8535 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8536 {
8537 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8538 {
8539 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8540 if (rcStrict == VINF_SUCCESS)
8541 return;
8542 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8543 }
8544 }
8545 /* Otherwise unlock it. */
8546 else
8547 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8548
8549 /* Free the entry. */
8550 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8551 Assert(pVCpu->iem.s.cActiveMappings != 0);
8552 pVCpu->iem.s.cActiveMappings--;
8553}
8554
8555#endif
8556
8557#ifndef IN_RING3
8558/**
8559 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8560 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8561 *
8562 * Allows the instruction to be completed and retired, while the IEM user will
8563 * return to ring-3 immediately afterwards and do the postponed writes there.
8564 *
8565 * @returns VBox status code (no strict statuses). Caller must check
8566 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8568 * @param pvMem The mapping.
8569 * @param fAccess The kind of access.
8570 */
8571IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8572{
8573 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8574 AssertReturn(iMemMap >= 0, iMemMap);
8575
8576 /* If it's bounce buffered, we may need to write back the buffer. */
8577 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8578 {
8579 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8580 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8581 }
8582 /* Otherwise unlock it. */
8583 else
8584 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8585
8586 /* Free the entry. */
8587 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8588 Assert(pVCpu->iem.s.cActiveMappings != 0);
8589 pVCpu->iem.s.cActiveMappings--;
8590 return VINF_SUCCESS;
8591}
8592#endif
8593
8594
8595/**
8596 * Rollbacks mappings, releasing page locks and such.
8597 *
8598 * The caller shall only call this after checking cActiveMappings.
8599 *
8600 * @returns Strict VBox status code to pass up.
8601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8602 */
8603IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8604{
8605 Assert(pVCpu->iem.s.cActiveMappings > 0);
8606
8607 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8608 while (iMemMap-- > 0)
8609 {
8610 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8611 if (fAccess != IEM_ACCESS_INVALID)
8612 {
8613 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8615 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8616 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8617 Assert(pVCpu->iem.s.cActiveMappings > 0);
8618 pVCpu->iem.s.cActiveMappings--;
8619 }
8620 }
8621}
8622
8623
8624/**
8625 * Fetches a data byte.
8626 *
8627 * @returns Strict VBox status code.
8628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8629 * @param pu8Dst Where to return the byte.
8630 * @param iSegReg The index of the segment register to use for
8631 * this access. The base and limits are checked.
8632 * @param GCPtrMem The address of the guest memory.
8633 */
8634IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8635{
8636 /* The lazy approach for now... */
8637 uint8_t const *pu8Src;
8638 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8639 if (rc == VINF_SUCCESS)
8640 {
8641 *pu8Dst = *pu8Src;
8642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8643 }
8644 return rc;
8645}
8646
8647
8648#ifdef IEM_WITH_SETJMP
8649/**
8650 * Fetches a data byte, longjmp on error.
8651 *
8652 * @returns The byte.
8653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8654 * @param iSegReg The index of the segment register to use for
8655 * this access. The base and limits are checked.
8656 * @param GCPtrMem The address of the guest memory.
8657 */
8658DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8659{
8660 /* The lazy approach for now... */
8661 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8662 uint8_t const bRet = *pu8Src;
8663 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8664 return bRet;
8665}
8666#endif /* IEM_WITH_SETJMP */
8667
8668
8669/**
8670 * Fetches a data word.
8671 *
8672 * @returns Strict VBox status code.
8673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8674 * @param pu16Dst Where to return the word.
8675 * @param iSegReg The index of the segment register to use for
8676 * this access. The base and limits are checked.
8677 * @param GCPtrMem The address of the guest memory.
8678 */
8679IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8680{
8681 /* The lazy approach for now... */
8682 uint16_t const *pu16Src;
8683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8684 if (rc == VINF_SUCCESS)
8685 {
8686 *pu16Dst = *pu16Src;
8687 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8688 }
8689 return rc;
8690}
8691
8692
8693#ifdef IEM_WITH_SETJMP
8694/**
8695 * Fetches a data word, longjmp on error.
8696 *
8697 * @returns The word
8698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8699 * @param iSegReg The index of the segment register to use for
8700 * this access. The base and limits are checked.
8701 * @param GCPtrMem The address of the guest memory.
8702 */
8703DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8704{
8705 /* The lazy approach for now... */
8706 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8707 uint16_t const u16Ret = *pu16Src;
8708 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8709 return u16Ret;
8710}
8711#endif
8712
8713
8714/**
8715 * Fetches a data dword.
8716 *
8717 * @returns Strict VBox status code.
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param pu32Dst Where to return the dword.
8720 * @param iSegReg The index of the segment register to use for
8721 * this access. The base and limits are checked.
8722 * @param GCPtrMem The address of the guest memory.
8723 */
8724IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8725{
8726 /* The lazy approach for now... */
8727 uint32_t const *pu32Src;
8728 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8729 if (rc == VINF_SUCCESS)
8730 {
8731 *pu32Dst = *pu32Src;
8732 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8733 }
8734 return rc;
8735}
8736
8737
8738#ifdef IEM_WITH_SETJMP
8739
8740IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8741{
8742 Assert(cbMem >= 1);
8743 Assert(iSegReg < X86_SREG_COUNT);
8744
8745 /*
8746 * 64-bit mode is simpler.
8747 */
8748 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8749 {
8750 if (iSegReg >= X86_SREG_FS)
8751 {
8752 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8753 GCPtrMem += pSel->u64Base;
8754 }
8755
8756 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8757 return GCPtrMem;
8758 }
8759 /*
8760 * 16-bit and 32-bit segmentation.
8761 */
8762 else
8763 {
8764 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8765 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8766 == X86DESCATTR_P /* data, expand up */
8767 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8768 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8769 {
8770 /* expand up */
8771 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8772 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8773 && GCPtrLast32 > (uint32_t)GCPtrMem))
8774 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8775 }
8776 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8777 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8778 {
8779 /* expand down */
8780 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8781 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8782 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8783 && GCPtrLast32 > (uint32_t)GCPtrMem))
8784 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8785 }
8786 else
8787 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8788 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8789 }
8790 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8791}
8792
8793
8794IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8795{
8796 Assert(cbMem >= 1);
8797 Assert(iSegReg < X86_SREG_COUNT);
8798
8799 /*
8800 * 64-bit mode is simpler.
8801 */
8802 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8803 {
8804 if (iSegReg >= X86_SREG_FS)
8805 {
8806 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8807 GCPtrMem += pSel->u64Base;
8808 }
8809
8810 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8811 return GCPtrMem;
8812 }
8813 /*
8814 * 16-bit and 32-bit segmentation.
8815 */
8816 else
8817 {
8818 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8819 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8820 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8821 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8822 {
8823 /* expand up */
8824 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8825 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8826 && GCPtrLast32 > (uint32_t)GCPtrMem))
8827 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8828 }
8829 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8830 {
8831 /* expand down */
8832 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8833 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8834 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8835 && GCPtrLast32 > (uint32_t)GCPtrMem))
8836 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8837 }
8838 else
8839 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8840 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8841 }
8842 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8843}
8844
8845
8846/**
8847 * Fetches a data dword, longjmp on error, fallback/safe version.
8848 *
8849 * @returns The dword
8850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8851 * @param iSegReg The index of the segment register to use for
8852 * this access. The base and limits are checked.
8853 * @param GCPtrMem The address of the guest memory.
8854 */
8855IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8856{
8857 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8858 uint32_t const u32Ret = *pu32Src;
8859 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8860 return u32Ret;
8861}
8862
8863
8864/**
8865 * Fetches a data dword, longjmp on error.
8866 *
8867 * @returns The dword
8868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8869 * @param iSegReg The index of the segment register to use for
8870 * this access. The base and limits are checked.
8871 * @param GCPtrMem The address of the guest memory.
8872 */
8873DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8874{
8875# ifdef IEM_WITH_DATA_TLB
8876 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8877 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8878 {
8879 /// @todo more later.
8880 }
8881
8882 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8883# else
8884 /* The lazy approach. */
8885 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8886 uint32_t const u32Ret = *pu32Src;
8887 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8888 return u32Ret;
8889# endif
8890}
8891#endif
8892
8893
8894#ifdef SOME_UNUSED_FUNCTION
8895/**
8896 * Fetches a data dword and sign extends it to a qword.
8897 *
8898 * @returns Strict VBox status code.
8899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8900 * @param pu64Dst Where to return the sign extended value.
8901 * @param iSegReg The index of the segment register to use for
8902 * this access. The base and limits are checked.
8903 * @param GCPtrMem The address of the guest memory.
8904 */
8905IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8906{
8907 /* The lazy approach for now... */
8908 int32_t const *pi32Src;
8909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8910 if (rc == VINF_SUCCESS)
8911 {
8912 *pu64Dst = *pi32Src;
8913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8914 }
8915#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8916 else
8917 *pu64Dst = 0;
8918#endif
8919 return rc;
8920}
8921#endif
8922
8923
8924/**
8925 * Fetches a data qword.
8926 *
8927 * @returns Strict VBox status code.
8928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8929 * @param pu64Dst Where to return the qword.
8930 * @param iSegReg The index of the segment register to use for
8931 * this access. The base and limits are checked.
8932 * @param GCPtrMem The address of the guest memory.
8933 */
8934IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8935{
8936 /* The lazy approach for now... */
8937 uint64_t const *pu64Src;
8938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8939 if (rc == VINF_SUCCESS)
8940 {
8941 *pu64Dst = *pu64Src;
8942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8943 }
8944 return rc;
8945}
8946
8947
8948#ifdef IEM_WITH_SETJMP
8949/**
8950 * Fetches a data qword, longjmp on error.
8951 *
8952 * @returns The qword.
8953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8954 * @param iSegReg The index of the segment register to use for
8955 * this access. The base and limits are checked.
8956 * @param GCPtrMem The address of the guest memory.
8957 */
8958DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8959{
8960 /* The lazy approach for now... */
8961 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8962 uint64_t const u64Ret = *pu64Src;
8963 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8964 return u64Ret;
8965}
8966#endif
8967
8968
8969/**
8970 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8971 *
8972 * @returns Strict VBox status code.
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param pu64Dst Where to return the qword.
8975 * @param iSegReg The index of the segment register to use for
8976 * this access. The base and limits are checked.
8977 * @param GCPtrMem The address of the guest memory.
8978 */
8979IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8980{
8981 /* The lazy approach for now... */
8982 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8983 if (RT_UNLIKELY(GCPtrMem & 15))
8984 return iemRaiseGeneralProtectionFault0(pVCpu);
8985
8986 uint64_t const *pu64Src;
8987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8988 if (rc == VINF_SUCCESS)
8989 {
8990 *pu64Dst = *pu64Src;
8991 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8992 }
8993 return rc;
8994}
8995
8996
8997#ifdef IEM_WITH_SETJMP
8998/**
8999 * Fetches a data qword, longjmp on error.
9000 *
9001 * @returns The qword.
9002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9003 * @param iSegReg The index of the segment register to use for
9004 * this access. The base and limits are checked.
9005 * @param GCPtrMem The address of the guest memory.
9006 */
9007DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9008{
9009 /* The lazy approach for now... */
9010 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9011 if (RT_LIKELY(!(GCPtrMem & 15)))
9012 {
9013 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9014 uint64_t const u64Ret = *pu64Src;
9015 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9016 return u64Ret;
9017 }
9018
9019 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9020 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9021}
9022#endif
9023
9024
9025/**
9026 * Fetches a data tword.
9027 *
9028 * @returns Strict VBox status code.
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 * @param pr80Dst Where to return the tword.
9031 * @param iSegReg The index of the segment register to use for
9032 * this access. The base and limits are checked.
9033 * @param GCPtrMem The address of the guest memory.
9034 */
9035IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9036{
9037 /* The lazy approach for now... */
9038 PCRTFLOAT80U pr80Src;
9039 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9040 if (rc == VINF_SUCCESS)
9041 {
9042 *pr80Dst = *pr80Src;
9043 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9044 }
9045 return rc;
9046}
9047
9048
9049#ifdef IEM_WITH_SETJMP
9050/**
9051 * Fetches a data tword, longjmp on error.
9052 *
9053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9054 * @param pr80Dst Where to return the tword.
9055 * @param iSegReg The index of the segment register to use for
9056 * this access. The base and limits are checked.
9057 * @param GCPtrMem The address of the guest memory.
9058 */
9059DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9060{
9061 /* The lazy approach for now... */
9062 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9063 *pr80Dst = *pr80Src;
9064 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9065}
9066#endif
9067
9068
9069/**
9070 * Fetches a data dqword (double qword), generally SSE related.
9071 *
9072 * @returns Strict VBox status code.
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pu128Dst Where to return the qword.
9075 * @param iSegReg The index of the segment register to use for
9076 * this access. The base and limits are checked.
9077 * @param GCPtrMem The address of the guest memory.
9078 */
9079IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9080{
9081 /* The lazy approach for now... */
9082 uint128_t const *pu128Src;
9083 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9084 if (rc == VINF_SUCCESS)
9085 {
9086 *pu128Dst = *pu128Src;
9087 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9088 }
9089 return rc;
9090}
9091
9092
9093#ifdef IEM_WITH_SETJMP
9094/**
9095 * Fetches a data dqword (double qword), generally SSE related.
9096 *
9097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9098 * @param pu128Dst Where to return the qword.
9099 * @param iSegReg The index of the segment register to use for
9100 * this access. The base and limits are checked.
9101 * @param GCPtrMem The address of the guest memory.
9102 */
9103IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9104{
9105 /* The lazy approach for now... */
9106 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9107 *pu128Dst = *pu128Src;
9108 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9109}
9110#endif
9111
9112
9113/**
9114 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9115 * related.
9116 *
9117 * Raises \#GP(0) if not aligned.
9118 *
9119 * @returns Strict VBox status code.
9120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9121 * @param pu128Dst Where to return the qword.
9122 * @param iSegReg The index of the segment register to use for
9123 * this access. The base and limits are checked.
9124 * @param GCPtrMem The address of the guest memory.
9125 */
9126IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9127{
9128 /* The lazy approach for now... */
9129 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9130 if ( (GCPtrMem & 15)
9131 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9132 return iemRaiseGeneralProtectionFault0(pVCpu);
9133
9134 uint128_t const *pu128Src;
9135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9136 if (rc == VINF_SUCCESS)
9137 {
9138 *pu128Dst = *pu128Src;
9139 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9140 }
9141 return rc;
9142}
9143
9144
9145#ifdef IEM_WITH_SETJMP
9146/**
9147 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9148 * related, longjmp on error.
9149 *
9150 * Raises \#GP(0) if not aligned.
9151 *
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param pu128Dst Where to return the qword.
9154 * @param iSegReg The index of the segment register to use for
9155 * this access. The base and limits are checked.
9156 * @param GCPtrMem The address of the guest memory.
9157 */
9158DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9159{
9160 /* The lazy approach for now... */
9161 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9162 if ( (GCPtrMem & 15) == 0
9163 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9164 {
9165 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9166 IEM_ACCESS_DATA_R);
9167 *pu128Dst = *pu128Src;
9168 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9169 return;
9170 }
9171
9172 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9173 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9174}
9175#endif
9176
9177
9178
9179/**
9180 * Fetches a descriptor register (lgdt, lidt).
9181 *
9182 * @returns Strict VBox status code.
9183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9184 * @param pcbLimit Where to return the limit.
9185 * @param pGCPtrBase Where to return the base.
9186 * @param iSegReg The index of the segment register to use for
9187 * this access. The base and limits are checked.
9188 * @param GCPtrMem The address of the guest memory.
9189 * @param enmOpSize The effective operand size.
9190 */
9191IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9192 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9193{
9194 /*
9195 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9196 * little special:
9197 * - The two reads are done separately.
9198 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9199 * - We suspect the 386 to actually commit the limit before the base in
9200 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9201 * don't try emulate this eccentric behavior, because it's not well
9202 * enough understood and rather hard to trigger.
9203 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9204 */
9205 VBOXSTRICTRC rcStrict;
9206 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9207 {
9208 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9209 if (rcStrict == VINF_SUCCESS)
9210 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9211 }
9212 else
9213 {
9214 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9215 if (enmOpSize == IEMMODE_32BIT)
9216 {
9217 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9218 {
9219 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9220 if (rcStrict == VINF_SUCCESS)
9221 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9222 }
9223 else
9224 {
9225 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9226 if (rcStrict == VINF_SUCCESS)
9227 {
9228 *pcbLimit = (uint16_t)uTmp;
9229 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9230 }
9231 }
9232 if (rcStrict == VINF_SUCCESS)
9233 *pGCPtrBase = uTmp;
9234 }
9235 else
9236 {
9237 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9238 if (rcStrict == VINF_SUCCESS)
9239 {
9240 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9241 if (rcStrict == VINF_SUCCESS)
9242 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9243 }
9244 }
9245 }
9246 return rcStrict;
9247}
9248
9249
9250
9251/**
9252 * Stores a data byte.
9253 *
9254 * @returns Strict VBox status code.
9255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9256 * @param iSegReg The index of the segment register to use for
9257 * this access. The base and limits are checked.
9258 * @param GCPtrMem The address of the guest memory.
9259 * @param u8Value The value to store.
9260 */
9261IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9262{
9263 /* The lazy approach for now... */
9264 uint8_t *pu8Dst;
9265 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9266 if (rc == VINF_SUCCESS)
9267 {
9268 *pu8Dst = u8Value;
9269 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9270 }
9271 return rc;
9272}
9273
9274
9275#ifdef IEM_WITH_SETJMP
9276/**
9277 * Stores a data byte, longjmp on error.
9278 *
9279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9280 * @param iSegReg The index of the segment register to use for
9281 * this access. The base and limits are checked.
9282 * @param GCPtrMem The address of the guest memory.
9283 * @param u8Value The value to store.
9284 */
9285IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9286{
9287 /* The lazy approach for now... */
9288 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9289 *pu8Dst = u8Value;
9290 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9291}
9292#endif
9293
9294
9295/**
9296 * Stores a data word.
9297 *
9298 * @returns Strict VBox status code.
9299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 * @param u16Value The value to store.
9304 */
9305IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9306{
9307 /* The lazy approach for now... */
9308 uint16_t *pu16Dst;
9309 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9310 if (rc == VINF_SUCCESS)
9311 {
9312 *pu16Dst = u16Value;
9313 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9314 }
9315 return rc;
9316}
9317
9318
9319#ifdef IEM_WITH_SETJMP
9320/**
9321 * Stores a data word, longjmp on error.
9322 *
9323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9324 * @param iSegReg The index of the segment register to use for
9325 * this access. The base and limits are checked.
9326 * @param GCPtrMem The address of the guest memory.
9327 * @param u16Value The value to store.
9328 */
9329IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9330{
9331 /* The lazy approach for now... */
9332 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9333 *pu16Dst = u16Value;
9334 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9335}
9336#endif
9337
9338
9339/**
9340 * Stores a data dword.
9341 *
9342 * @returns Strict VBox status code.
9343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9344 * @param iSegReg The index of the segment register to use for
9345 * this access. The base and limits are checked.
9346 * @param GCPtrMem The address of the guest memory.
9347 * @param u32Value The value to store.
9348 */
9349IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9350{
9351 /* The lazy approach for now... */
9352 uint32_t *pu32Dst;
9353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9354 if (rc == VINF_SUCCESS)
9355 {
9356 *pu32Dst = u32Value;
9357 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9358 }
9359 return rc;
9360}
9361
9362
9363#ifdef IEM_WITH_SETJMP
9364/**
9365 * Stores a data dword.
9366 *
9367 * @returns Strict VBox status code.
9368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 * @param u32Value The value to store.
9373 */
9374IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9375{
9376 /* The lazy approach for now... */
9377 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9378 *pu32Dst = u32Value;
9379 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9380}
9381#endif
9382
9383
9384/**
9385 * Stores a data qword.
9386 *
9387 * @returns Strict VBox status code.
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 * @param u64Value The value to store.
9393 */
9394IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9395{
9396 /* The lazy approach for now... */
9397 uint64_t *pu64Dst;
9398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9399 if (rc == VINF_SUCCESS)
9400 {
9401 *pu64Dst = u64Value;
9402 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9403 }
9404 return rc;
9405}
9406
9407
9408#ifdef IEM_WITH_SETJMP
9409/**
9410 * Stores a data qword, longjmp on error.
9411 *
9412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9413 * @param iSegReg The index of the segment register to use for
9414 * this access. The base and limits are checked.
9415 * @param GCPtrMem The address of the guest memory.
9416 * @param u64Value The value to store.
9417 */
9418IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9419{
9420 /* The lazy approach for now... */
9421 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9422 *pu64Dst = u64Value;
9423 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9424}
9425#endif
9426
9427
9428/**
9429 * Stores a data dqword.
9430 *
9431 * @returns Strict VBox status code.
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 * @param u128Value The value to store.
9437 */
9438IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9439{
9440 /* The lazy approach for now... */
9441 uint128_t *pu128Dst;
9442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9443 if (rc == VINF_SUCCESS)
9444 {
9445 *pu128Dst = u128Value;
9446 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9447 }
9448 return rc;
9449}
9450
9451
9452#ifdef IEM_WITH_SETJMP
9453/**
9454 * Stores a data dqword, longjmp on error.
9455 *
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param iSegReg The index of the segment register to use for
9458 * this access. The base and limits are checked.
9459 * @param GCPtrMem The address of the guest memory.
9460 * @param u128Value The value to store.
9461 */
9462IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9463{
9464 /* The lazy approach for now... */
9465 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9466 *pu128Dst = u128Value;
9467 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9468}
9469#endif
9470
9471
9472/**
9473 * Stores a data dqword, SSE aligned.
9474 *
9475 * @returns Strict VBox status code.
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param iSegReg The index of the segment register to use for
9478 * this access. The base and limits are checked.
9479 * @param GCPtrMem The address of the guest memory.
9480 * @param u128Value The value to store.
9481 */
9482IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9483{
9484 /* The lazy approach for now... */
9485 if ( (GCPtrMem & 15)
9486 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9487 return iemRaiseGeneralProtectionFault0(pVCpu);
9488
9489 uint128_t *pu128Dst;
9490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9491 if (rc == VINF_SUCCESS)
9492 {
9493 *pu128Dst = u128Value;
9494 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9495 }
9496 return rc;
9497}
9498
9499
9500#ifdef IEM_WITH_SETJMP
9501/**
9502 * Stores a data dqword, SSE aligned.
9503 *
9504 * @returns Strict VBox status code.
9505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9506 * @param iSegReg The index of the segment register to use for
9507 * this access. The base and limits are checked.
9508 * @param GCPtrMem The address of the guest memory.
9509 * @param u128Value The value to store.
9510 */
9511DECL_NO_INLINE(IEM_STATIC, void)
9512iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9513{
9514 /* The lazy approach for now... */
9515 if ( (GCPtrMem & 15) == 0
9516 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9517 {
9518 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9519 *pu128Dst = u128Value;
9520 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9521 return;
9522 }
9523
9524 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9525 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9526}
9527#endif
9528
9529
9530/**
9531 * Stores a descriptor register (sgdt, sidt).
9532 *
9533 * @returns Strict VBox status code.
9534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9535 * @param cbLimit The limit.
9536 * @param GCPtrBase The base address.
9537 * @param iSegReg The index of the segment register to use for
9538 * this access. The base and limits are checked.
9539 * @param GCPtrMem The address of the guest memory.
9540 */
9541IEM_STATIC VBOXSTRICTRC
9542iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9543{
9544 /*
9545 * The SIDT and SGDT instructions actually stores the data using two
9546 * independent writes. The instructions does not respond to opsize prefixes.
9547 */
9548 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9549 if (rcStrict == VINF_SUCCESS)
9550 {
9551 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9552 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9553 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9554 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9555 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9556 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9557 else
9558 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9559 }
9560 return rcStrict;
9561}
9562
9563
9564/**
9565 * Pushes a word onto the stack.
9566 *
9567 * @returns Strict VBox status code.
9568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9569 * @param u16Value The value to push.
9570 */
9571IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9572{
9573 /* Increment the stack pointer. */
9574 uint64_t uNewRsp;
9575 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9576 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9577
9578 /* Write the word the lazy way. */
9579 uint16_t *pu16Dst;
9580 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9581 if (rc == VINF_SUCCESS)
9582 {
9583 *pu16Dst = u16Value;
9584 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9585 }
9586
9587 /* Commit the new RSP value unless we an access handler made trouble. */
9588 if (rc == VINF_SUCCESS)
9589 pCtx->rsp = uNewRsp;
9590
9591 return rc;
9592}
9593
9594
9595/**
9596 * Pushes a dword onto the stack.
9597 *
9598 * @returns Strict VBox status code.
9599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9600 * @param u32Value The value to push.
9601 */
9602IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9603{
9604 /* Increment the stack pointer. */
9605 uint64_t uNewRsp;
9606 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9607 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9608
9609 /* Write the dword the lazy way. */
9610 uint32_t *pu32Dst;
9611 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9612 if (rc == VINF_SUCCESS)
9613 {
9614 *pu32Dst = u32Value;
9615 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9616 }
9617
9618 /* Commit the new RSP value unless we an access handler made trouble. */
9619 if (rc == VINF_SUCCESS)
9620 pCtx->rsp = uNewRsp;
9621
9622 return rc;
9623}
9624
9625
9626/**
9627 * Pushes a dword segment register value onto the stack.
9628 *
9629 * @returns Strict VBox status code.
9630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9631 * @param u32Value The value to push.
9632 */
9633IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9634{
9635 /* Increment the stack pointer. */
9636 uint64_t uNewRsp;
9637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9638 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9639
9640 VBOXSTRICTRC rc;
9641 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9642 {
9643 /* The recompiler writes a full dword. */
9644 uint32_t *pu32Dst;
9645 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9646 if (rc == VINF_SUCCESS)
9647 {
9648 *pu32Dst = u32Value;
9649 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9650 }
9651 }
9652 else
9653 {
9654 /* The intel docs talks about zero extending the selector register
9655 value. My actual intel CPU here might be zero extending the value
9656 but it still only writes the lower word... */
9657 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9658 * happens when crossing an electric page boundrary, is the high word checked
9659 * for write accessibility or not? Probably it is. What about segment limits?
9660 * It appears this behavior is also shared with trap error codes.
9661 *
9662 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9663 * ancient hardware when it actually did change. */
9664 uint16_t *pu16Dst;
9665 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9666 if (rc == VINF_SUCCESS)
9667 {
9668 *pu16Dst = (uint16_t)u32Value;
9669 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9670 }
9671 }
9672
9673 /* Commit the new RSP value unless we an access handler made trouble. */
9674 if (rc == VINF_SUCCESS)
9675 pCtx->rsp = uNewRsp;
9676
9677 return rc;
9678}
9679
9680
9681/**
9682 * Pushes a qword onto the stack.
9683 *
9684 * @returns Strict VBox status code.
9685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9686 * @param u64Value The value to push.
9687 */
9688IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9689{
9690 /* Increment the stack pointer. */
9691 uint64_t uNewRsp;
9692 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9693 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9694
9695 /* Write the word the lazy way. */
9696 uint64_t *pu64Dst;
9697 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9698 if (rc == VINF_SUCCESS)
9699 {
9700 *pu64Dst = u64Value;
9701 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9702 }
9703
9704 /* Commit the new RSP value unless we an access handler made trouble. */
9705 if (rc == VINF_SUCCESS)
9706 pCtx->rsp = uNewRsp;
9707
9708 return rc;
9709}
9710
9711
9712/**
9713 * Pops a word from the stack.
9714 *
9715 * @returns Strict VBox status code.
9716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9717 * @param pu16Value Where to store the popped value.
9718 */
9719IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9720{
9721 /* Increment the stack pointer. */
9722 uint64_t uNewRsp;
9723 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9724 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9725
9726 /* Write the word the lazy way. */
9727 uint16_t const *pu16Src;
9728 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9729 if (rc == VINF_SUCCESS)
9730 {
9731 *pu16Value = *pu16Src;
9732 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9733
9734 /* Commit the new RSP value. */
9735 if (rc == VINF_SUCCESS)
9736 pCtx->rsp = uNewRsp;
9737 }
9738
9739 return rc;
9740}
9741
9742
9743/**
9744 * Pops a dword from the stack.
9745 *
9746 * @returns Strict VBox status code.
9747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9748 * @param pu32Value Where to store the popped value.
9749 */
9750IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9751{
9752 /* Increment the stack pointer. */
9753 uint64_t uNewRsp;
9754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9755 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9756
9757 /* Write the word the lazy way. */
9758 uint32_t const *pu32Src;
9759 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9760 if (rc == VINF_SUCCESS)
9761 {
9762 *pu32Value = *pu32Src;
9763 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9764
9765 /* Commit the new RSP value. */
9766 if (rc == VINF_SUCCESS)
9767 pCtx->rsp = uNewRsp;
9768 }
9769
9770 return rc;
9771}
9772
9773
9774/**
9775 * Pops a qword from the stack.
9776 *
9777 * @returns Strict VBox status code.
9778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9779 * @param pu64Value Where to store the popped value.
9780 */
9781IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9782{
9783 /* Increment the stack pointer. */
9784 uint64_t uNewRsp;
9785 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9786 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9787
9788 /* Write the word the lazy way. */
9789 uint64_t const *pu64Src;
9790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9791 if (rc == VINF_SUCCESS)
9792 {
9793 *pu64Value = *pu64Src;
9794 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9795
9796 /* Commit the new RSP value. */
9797 if (rc == VINF_SUCCESS)
9798 pCtx->rsp = uNewRsp;
9799 }
9800
9801 return rc;
9802}
9803
9804
9805/**
9806 * Pushes a word onto the stack, using a temporary stack pointer.
9807 *
9808 * @returns Strict VBox status code.
9809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9810 * @param u16Value The value to push.
9811 * @param pTmpRsp Pointer to the temporary stack pointer.
9812 */
9813IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9814{
9815 /* Increment the stack pointer. */
9816 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9817 RTUINT64U NewRsp = *pTmpRsp;
9818 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9819
9820 /* Write the word the lazy way. */
9821 uint16_t *pu16Dst;
9822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9823 if (rc == VINF_SUCCESS)
9824 {
9825 *pu16Dst = u16Value;
9826 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9827 }
9828
9829 /* Commit the new RSP value unless we an access handler made trouble. */
9830 if (rc == VINF_SUCCESS)
9831 *pTmpRsp = NewRsp;
9832
9833 return rc;
9834}
9835
9836
9837/**
9838 * Pushes a dword onto the stack, using a temporary stack pointer.
9839 *
9840 * @returns Strict VBox status code.
9841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9842 * @param u32Value The value to push.
9843 * @param pTmpRsp Pointer to the temporary stack pointer.
9844 */
9845IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9846{
9847 /* Increment the stack pointer. */
9848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9849 RTUINT64U NewRsp = *pTmpRsp;
9850 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9851
9852 /* Write the word the lazy way. */
9853 uint32_t *pu32Dst;
9854 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9855 if (rc == VINF_SUCCESS)
9856 {
9857 *pu32Dst = u32Value;
9858 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9859 }
9860
9861 /* Commit the new RSP value unless we an access handler made trouble. */
9862 if (rc == VINF_SUCCESS)
9863 *pTmpRsp = NewRsp;
9864
9865 return rc;
9866}
9867
9868
9869/**
9870 * Pushes a dword onto the stack, using a temporary stack pointer.
9871 *
9872 * @returns Strict VBox status code.
9873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9874 * @param u64Value The value to push.
9875 * @param pTmpRsp Pointer to the temporary stack pointer.
9876 */
9877IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9878{
9879 /* Increment the stack pointer. */
9880 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9881 RTUINT64U NewRsp = *pTmpRsp;
9882 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9883
9884 /* Write the word the lazy way. */
9885 uint64_t *pu64Dst;
9886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9887 if (rc == VINF_SUCCESS)
9888 {
9889 *pu64Dst = u64Value;
9890 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9891 }
9892
9893 /* Commit the new RSP value unless we an access handler made trouble. */
9894 if (rc == VINF_SUCCESS)
9895 *pTmpRsp = NewRsp;
9896
9897 return rc;
9898}
9899
9900
9901/**
9902 * Pops a word from the stack, using a temporary stack pointer.
9903 *
9904 * @returns Strict VBox status code.
9905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9906 * @param pu16Value Where to store the popped value.
9907 * @param pTmpRsp Pointer to the temporary stack pointer.
9908 */
9909IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9910{
9911 /* Increment the stack pointer. */
9912 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9913 RTUINT64U NewRsp = *pTmpRsp;
9914 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9915
9916 /* Write the word the lazy way. */
9917 uint16_t const *pu16Src;
9918 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9919 if (rc == VINF_SUCCESS)
9920 {
9921 *pu16Value = *pu16Src;
9922 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9923
9924 /* Commit the new RSP value. */
9925 if (rc == VINF_SUCCESS)
9926 *pTmpRsp = NewRsp;
9927 }
9928
9929 return rc;
9930}
9931
9932
9933/**
9934 * Pops a dword from the stack, using a temporary stack pointer.
9935 *
9936 * @returns Strict VBox status code.
9937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9938 * @param pu32Value Where to store the popped value.
9939 * @param pTmpRsp Pointer to the temporary stack pointer.
9940 */
9941IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9942{
9943 /* Increment the stack pointer. */
9944 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9945 RTUINT64U NewRsp = *pTmpRsp;
9946 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9947
9948 /* Write the word the lazy way. */
9949 uint32_t const *pu32Src;
9950 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9951 if (rc == VINF_SUCCESS)
9952 {
9953 *pu32Value = *pu32Src;
9954 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9955
9956 /* Commit the new RSP value. */
9957 if (rc == VINF_SUCCESS)
9958 *pTmpRsp = NewRsp;
9959 }
9960
9961 return rc;
9962}
9963
9964
9965/**
9966 * Pops a qword from the stack, using a temporary stack pointer.
9967 *
9968 * @returns Strict VBox status code.
9969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9970 * @param pu64Value Where to store the popped value.
9971 * @param pTmpRsp Pointer to the temporary stack pointer.
9972 */
9973IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9974{
9975 /* Increment the stack pointer. */
9976 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9977 RTUINT64U NewRsp = *pTmpRsp;
9978 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9979
9980 /* Write the word the lazy way. */
9981 uint64_t const *pu64Src;
9982 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9983 if (rcStrict == VINF_SUCCESS)
9984 {
9985 *pu64Value = *pu64Src;
9986 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9987
9988 /* Commit the new RSP value. */
9989 if (rcStrict == VINF_SUCCESS)
9990 *pTmpRsp = NewRsp;
9991 }
9992
9993 return rcStrict;
9994}
9995
9996
9997/**
9998 * Begin a special stack push (used by interrupt, exceptions and such).
9999 *
10000 * This will raise \#SS or \#PF if appropriate.
10001 *
10002 * @returns Strict VBox status code.
10003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10004 * @param cbMem The number of bytes to push onto the stack.
10005 * @param ppvMem Where to return the pointer to the stack memory.
10006 * As with the other memory functions this could be
10007 * direct access or bounce buffered access, so
10008 * don't commit register until the commit call
10009 * succeeds.
10010 * @param puNewRsp Where to return the new RSP value. This must be
10011 * passed unchanged to
10012 * iemMemStackPushCommitSpecial().
10013 */
10014IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10015{
10016 Assert(cbMem < UINT8_MAX);
10017 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10018 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10019 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10020}
10021
10022
10023/**
10024 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10025 *
10026 * This will update the rSP.
10027 *
10028 * @returns Strict VBox status code.
10029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10030 * @param pvMem The pointer returned by
10031 * iemMemStackPushBeginSpecial().
10032 * @param uNewRsp The new RSP value returned by
10033 * iemMemStackPushBeginSpecial().
10034 */
10035IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10036{
10037 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10038 if (rcStrict == VINF_SUCCESS)
10039 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10040 return rcStrict;
10041}
10042
10043
10044/**
10045 * Begin a special stack pop (used by iret, retf and such).
10046 *
10047 * This will raise \#SS or \#PF if appropriate.
10048 *
10049 * @returns Strict VBox status code.
10050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10051 * @param cbMem The number of bytes to pop from the stack.
10052 * @param ppvMem Where to return the pointer to the stack memory.
10053 * @param puNewRsp Where to return the new RSP value. This must be
10054 * assigned to CPUMCTX::rsp manually some time
10055 * after iemMemStackPopDoneSpecial() has been
10056 * called.
10057 */
10058IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10059{
10060 Assert(cbMem < UINT8_MAX);
10061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10062 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10063 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10064}
10065
10066
10067/**
10068 * Continue a special stack pop (used by iret and retf).
10069 *
10070 * This will raise \#SS or \#PF if appropriate.
10071 *
10072 * @returns Strict VBox status code.
10073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10074 * @param cbMem The number of bytes to pop from the stack.
10075 * @param ppvMem Where to return the pointer to the stack memory.
10076 * @param puNewRsp Where to return the new RSP value. This must be
10077 * assigned to CPUMCTX::rsp manually some time
10078 * after iemMemStackPopDoneSpecial() has been
10079 * called.
10080 */
10081IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10082{
10083 Assert(cbMem < UINT8_MAX);
10084 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10085 RTUINT64U NewRsp;
10086 NewRsp.u = *puNewRsp;
10087 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10088 *puNewRsp = NewRsp.u;
10089 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10090}
10091
10092
10093/**
10094 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10095 * iemMemStackPopContinueSpecial).
10096 *
10097 * The caller will manually commit the rSP.
10098 *
10099 * @returns Strict VBox status code.
10100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10101 * @param pvMem The pointer returned by
10102 * iemMemStackPopBeginSpecial() or
10103 * iemMemStackPopContinueSpecial().
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10106{
10107 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10108}
10109
10110
10111/**
10112 * Fetches a system table byte.
10113 *
10114 * @returns Strict VBox status code.
10115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10116 * @param pbDst Where to return the byte.
10117 * @param iSegReg The index of the segment register to use for
10118 * this access. The base and limits are checked.
10119 * @param GCPtrMem The address of the guest memory.
10120 */
10121IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10122{
10123 /* The lazy approach for now... */
10124 uint8_t const *pbSrc;
10125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10126 if (rc == VINF_SUCCESS)
10127 {
10128 *pbDst = *pbSrc;
10129 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10130 }
10131 return rc;
10132}
10133
10134
10135/**
10136 * Fetches a system table word.
10137 *
10138 * @returns Strict VBox status code.
10139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10140 * @param pu16Dst Where to return the word.
10141 * @param iSegReg The index of the segment register to use for
10142 * this access. The base and limits are checked.
10143 * @param GCPtrMem The address of the guest memory.
10144 */
10145IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10146{
10147 /* The lazy approach for now... */
10148 uint16_t const *pu16Src;
10149 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10150 if (rc == VINF_SUCCESS)
10151 {
10152 *pu16Dst = *pu16Src;
10153 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10154 }
10155 return rc;
10156}
10157
10158
10159/**
10160 * Fetches a system table dword.
10161 *
10162 * @returns Strict VBox status code.
10163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10164 * @param pu32Dst Where to return the dword.
10165 * @param iSegReg The index of the segment register to use for
10166 * this access. The base and limits are checked.
10167 * @param GCPtrMem The address of the guest memory.
10168 */
10169IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10170{
10171 /* The lazy approach for now... */
10172 uint32_t const *pu32Src;
10173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10174 if (rc == VINF_SUCCESS)
10175 {
10176 *pu32Dst = *pu32Src;
10177 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10178 }
10179 return rc;
10180}
10181
10182
10183/**
10184 * Fetches a system table qword.
10185 *
10186 * @returns Strict VBox status code.
10187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10188 * @param pu64Dst Where to return the qword.
10189 * @param iSegReg The index of the segment register to use for
10190 * this access. The base and limits are checked.
10191 * @param GCPtrMem The address of the guest memory.
10192 */
10193IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10194{
10195 /* The lazy approach for now... */
10196 uint64_t const *pu64Src;
10197 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10198 if (rc == VINF_SUCCESS)
10199 {
10200 *pu64Dst = *pu64Src;
10201 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10202 }
10203 return rc;
10204}
10205
10206
10207/**
10208 * Fetches a descriptor table entry with caller specified error code.
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param pDesc Where to return the descriptor table entry.
10213 * @param uSel The selector which table entry to fetch.
10214 * @param uXcpt The exception to raise on table lookup error.
10215 * @param uErrorCode The error code associated with the exception.
10216 */
10217IEM_STATIC VBOXSTRICTRC
10218iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10219{
10220 AssertPtr(pDesc);
10221 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10222
10223 /** @todo did the 286 require all 8 bytes to be accessible? */
10224 /*
10225 * Get the selector table base and check bounds.
10226 */
10227 RTGCPTR GCPtrBase;
10228 if (uSel & X86_SEL_LDT)
10229 {
10230 if ( !pCtx->ldtr.Attr.n.u1Present
10231 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10232 {
10233 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10234 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10235 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10236 uErrorCode, 0);
10237 }
10238
10239 Assert(pCtx->ldtr.Attr.n.u1Present);
10240 GCPtrBase = pCtx->ldtr.u64Base;
10241 }
10242 else
10243 {
10244 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10245 {
10246 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10247 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10248 uErrorCode, 0);
10249 }
10250 GCPtrBase = pCtx->gdtr.pGdt;
10251 }
10252
10253 /*
10254 * Read the legacy descriptor and maybe the long mode extensions if
10255 * required.
10256 */
10257 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10258 if (rcStrict == VINF_SUCCESS)
10259 {
10260 if ( !IEM_IS_LONG_MODE(pVCpu)
10261 || pDesc->Legacy.Gen.u1DescType)
10262 pDesc->Long.au64[1] = 0;
10263 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10264 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10265 else
10266 {
10267 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10268 /** @todo is this the right exception? */
10269 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10270 }
10271 }
10272 return rcStrict;
10273}
10274
10275
10276/**
10277 * Fetches a descriptor table entry.
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10281 * @param pDesc Where to return the descriptor table entry.
10282 * @param uSel The selector which table entry to fetch.
10283 * @param uXcpt The exception to raise on table lookup error.
10284 */
10285IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10286{
10287 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10288}
10289
10290
10291/**
10292 * Fakes a long mode stack selector for SS = 0.
10293 *
10294 * @param pDescSs Where to return the fake stack descriptor.
10295 * @param uDpl The DPL we want.
10296 */
10297IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10298{
10299 pDescSs->Long.au64[0] = 0;
10300 pDescSs->Long.au64[1] = 0;
10301 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10302 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10303 pDescSs->Long.Gen.u2Dpl = uDpl;
10304 pDescSs->Long.Gen.u1Present = 1;
10305 pDescSs->Long.Gen.u1Long = 1;
10306}
10307
10308
10309/**
10310 * Marks the selector descriptor as accessed (only non-system descriptors).
10311 *
10312 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10313 * will therefore skip the limit checks.
10314 *
10315 * @returns Strict VBox status code.
10316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10317 * @param uSel The selector.
10318 */
10319IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10320{
10321 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10322
10323 /*
10324 * Get the selector table base and calculate the entry address.
10325 */
10326 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10327 ? pCtx->ldtr.u64Base
10328 : pCtx->gdtr.pGdt;
10329 GCPtr += uSel & X86_SEL_MASK;
10330
10331 /*
10332 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10333 * ugly stuff to avoid this. This will make sure it's an atomic access
10334 * as well more or less remove any question about 8-bit or 32-bit accesss.
10335 */
10336 VBOXSTRICTRC rcStrict;
10337 uint32_t volatile *pu32;
10338 if ((GCPtr & 3) == 0)
10339 {
10340 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10341 GCPtr += 2 + 2;
10342 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10343 if (rcStrict != VINF_SUCCESS)
10344 return rcStrict;
10345 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10346 }
10347 else
10348 {
10349 /* The misaligned GDT/LDT case, map the whole thing. */
10350 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10351 if (rcStrict != VINF_SUCCESS)
10352 return rcStrict;
10353 switch ((uintptr_t)pu32 & 3)
10354 {
10355 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10356 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10357 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10358 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10359 }
10360 }
10361
10362 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10363}
10364
10365/** @} */
10366
10367
10368/*
10369 * Include the C/C++ implementation of instruction.
10370 */
10371#include "IEMAllCImpl.cpp.h"
10372
10373
10374
10375/** @name "Microcode" macros.
10376 *
10377 * The idea is that we should be able to use the same code to interpret
10378 * instructions as well as recompiler instructions. Thus this obfuscation.
10379 *
10380 * @{
10381 */
10382#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10383#define IEM_MC_END() }
10384#define IEM_MC_PAUSE() do {} while (0)
10385#define IEM_MC_CONTINUE() do {} while (0)
10386
10387/** Internal macro. */
10388#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10389 do \
10390 { \
10391 VBOXSTRICTRC rcStrict2 = a_Expr; \
10392 if (rcStrict2 != VINF_SUCCESS) \
10393 return rcStrict2; \
10394 } while (0)
10395
10396
10397#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10398#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10399#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10400#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10401#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10402#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10403#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10404#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10405#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10406 do { \
10407 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10408 return iemRaiseDeviceNotAvailable(pVCpu); \
10409 } while (0)
10410#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10411 do { \
10412 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10413 return iemRaiseMathFault(pVCpu); \
10414 } while (0)
10415#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10416 do { \
10417 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10418 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10419 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10420 return iemRaiseUndefinedOpcode(pVCpu); \
10421 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10422 return iemRaiseDeviceNotAvailable(pVCpu); \
10423 } while (0)
10424#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10425 do { \
10426 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10427 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10428 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10429 return iemRaiseUndefinedOpcode(pVCpu); \
10430 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10431 return iemRaiseDeviceNotAvailable(pVCpu); \
10432 } while (0)
10433#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10434 do { \
10435 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10436 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10437 return iemRaiseUndefinedOpcode(pVCpu); \
10438 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10439 return iemRaiseDeviceNotAvailable(pVCpu); \
10440 } while (0)
10441#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10442 do { \
10443 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10444 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10445 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10446 return iemRaiseUndefinedOpcode(pVCpu); \
10447 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10448 return iemRaiseDeviceNotAvailable(pVCpu); \
10449 } while (0)
10450#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10451 do { \
10452 if (pVCpu->iem.s.uCpl != 0) \
10453 return iemRaiseGeneralProtectionFault0(pVCpu); \
10454 } while (0)
10455
10456
10457#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10458#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10459#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10460#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10461#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10462#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10463#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10464 uint32_t a_Name; \
10465 uint32_t *a_pName = &a_Name
10466#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10467 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10468
10469#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10470#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10471
10472#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10473#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10474#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10475#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10476#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10477#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10478#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10479#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10480#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10481#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10482#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10483#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10484#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10485#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10486#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10487#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10488#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10489#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10490#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10491#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10492#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10493#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10494#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10495#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10496#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10497#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10498#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10499#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10500#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10501/** @note Not for IOPL or IF testing or modification. */
10502#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10503#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10504#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10505#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10506
10507#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10508#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10509#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10510#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10511#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10512#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10513#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10514#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10515#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10516#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10517#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10518 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10519
10520#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10521#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10522/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10523 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10524#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10525#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10526/** @note Not for IOPL or IF testing or modification. */
10527#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10528
10529#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10530#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10531#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10532 do { \
10533 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10534 *pu32Reg += (a_u32Value); \
10535 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10536 } while (0)
10537#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10538
10539#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10540#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10541#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10542 do { \
10543 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10544 *pu32Reg -= (a_u32Value); \
10545 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10546 } while (0)
10547#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10548#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10549
10550#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10551#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10552#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10553#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10554#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10555#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10556#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10557
10558#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10559#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10560#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10561#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10562
10563#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10564#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10565#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10566
10567#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10568#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10569#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10570
10571#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10572#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10573#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10574
10575#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10576#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10577#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10578
10579#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10580
10581#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10582
10583#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10584#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10585#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10586 do { \
10587 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10588 *pu32Reg &= (a_u32Value); \
10589 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10590 } while (0)
10591#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10592
10593#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10594#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10595#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10596 do { \
10597 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10598 *pu32Reg |= (a_u32Value); \
10599 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10600 } while (0)
10601#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10602
10603
10604/** @note Not for IOPL or IF modification. */
10605#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10606/** @note Not for IOPL or IF modification. */
10607#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10608/** @note Not for IOPL or IF modification. */
10609#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10610
10611#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10612
10613
10614#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10615 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10616#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10617 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10618#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10619 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10620#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10621 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10622#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10623 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10624#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10625 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10626#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10627 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10628
10629#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10630 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10631#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10632 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10633#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10634 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10635#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10636 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10637#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10638 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10639#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10640 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10641 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10642 } while (0)
10643#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10644 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10645 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10646 } while (0)
10647#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10648 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10649#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10650 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10651#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10652 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10653#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10654 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10655 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10656
10657#ifndef IEM_WITH_SETJMP
10658# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10660# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10662# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10664#else
10665# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10666 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10667# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10668 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10669# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10670 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10671#endif
10672
10673#ifndef IEM_WITH_SETJMP
10674# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10676# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10678# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10680#else
10681# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10682 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10683# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10684 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10685# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10686 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10687#endif
10688
10689#ifndef IEM_WITH_SETJMP
10690# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10692# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10694# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10696#else
10697# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10698 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10699# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10700 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10701# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10702 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10703#endif
10704
10705#ifdef SOME_UNUSED_FUNCTION
10706# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10708#endif
10709
10710#ifndef IEM_WITH_SETJMP
10711# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10713# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10715# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10717# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10719#else
10720# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10721 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10722# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10723 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10724# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10725 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10726# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10727 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10728#endif
10729
10730#ifndef IEM_WITH_SETJMP
10731# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10733# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10735# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10737#else
10738# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10739 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10740# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10741 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10742# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10743 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10744#endif
10745
10746#ifndef IEM_WITH_SETJMP
10747# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10749# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10751#else
10752# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10753 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10754# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10755 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10756#endif
10757
10758
10759
10760#ifndef IEM_WITH_SETJMP
10761# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10762 do { \
10763 uint8_t u8Tmp; \
10764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10765 (a_u16Dst) = u8Tmp; \
10766 } while (0)
10767# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10768 do { \
10769 uint8_t u8Tmp; \
10770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10771 (a_u32Dst) = u8Tmp; \
10772 } while (0)
10773# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10774 do { \
10775 uint8_t u8Tmp; \
10776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10777 (a_u64Dst) = u8Tmp; \
10778 } while (0)
10779# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10780 do { \
10781 uint16_t u16Tmp; \
10782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10783 (a_u32Dst) = u16Tmp; \
10784 } while (0)
10785# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10786 do { \
10787 uint16_t u16Tmp; \
10788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10789 (a_u64Dst) = u16Tmp; \
10790 } while (0)
10791# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10792 do { \
10793 uint32_t u32Tmp; \
10794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10795 (a_u64Dst) = u32Tmp; \
10796 } while (0)
10797#else /* IEM_WITH_SETJMP */
10798# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10799 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10800# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10801 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10802# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10803 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10804# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10805 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10806# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10807 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10808# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10809 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10810#endif /* IEM_WITH_SETJMP */
10811
10812#ifndef IEM_WITH_SETJMP
10813# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10814 do { \
10815 uint8_t u8Tmp; \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10817 (a_u16Dst) = (int8_t)u8Tmp; \
10818 } while (0)
10819# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10820 do { \
10821 uint8_t u8Tmp; \
10822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10823 (a_u32Dst) = (int8_t)u8Tmp; \
10824 } while (0)
10825# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10826 do { \
10827 uint8_t u8Tmp; \
10828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10829 (a_u64Dst) = (int8_t)u8Tmp; \
10830 } while (0)
10831# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10832 do { \
10833 uint16_t u16Tmp; \
10834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10835 (a_u32Dst) = (int16_t)u16Tmp; \
10836 } while (0)
10837# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10838 do { \
10839 uint16_t u16Tmp; \
10840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10841 (a_u64Dst) = (int16_t)u16Tmp; \
10842 } while (0)
10843# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10844 do { \
10845 uint32_t u32Tmp; \
10846 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10847 (a_u64Dst) = (int32_t)u32Tmp; \
10848 } while (0)
10849#else /* IEM_WITH_SETJMP */
10850# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10851 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10852# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10853 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10854# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10855 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10856# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10857 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10858# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10859 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10860# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10861 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10862#endif /* IEM_WITH_SETJMP */
10863
10864#ifndef IEM_WITH_SETJMP
10865# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10867# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10869# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10871# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10873#else
10874# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10875 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10876# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10877 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10878# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10879 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10880# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10881 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10882#endif
10883
10884#ifndef IEM_WITH_SETJMP
10885# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10887# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10889# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10891# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10892 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10893#else
10894# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10895 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10896# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10897 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10898# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10899 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10900# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10901 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10902#endif
10903
10904#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10905#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10906#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10907#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10908#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10909#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10910#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10911 do { \
10912 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10913 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10914 } while (0)
10915
10916#ifndef IEM_WITH_SETJMP
10917# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10918 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10919# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10920 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10921#else
10922# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10923 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10924# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10925 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10926#endif
10927
10928
10929#define IEM_MC_PUSH_U16(a_u16Value) \
10930 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10931#define IEM_MC_PUSH_U32(a_u32Value) \
10932 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10933#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10934 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10935#define IEM_MC_PUSH_U64(a_u64Value) \
10936 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10937
10938#define IEM_MC_POP_U16(a_pu16Value) \
10939 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10940#define IEM_MC_POP_U32(a_pu32Value) \
10941 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10942#define IEM_MC_POP_U64(a_pu64Value) \
10943 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10944
10945/** Maps guest memory for direct or bounce buffered access.
10946 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10947 * @remarks May return.
10948 */
10949#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10950 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10951
10952/** Maps guest memory for direct or bounce buffered access.
10953 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10954 * @remarks May return.
10955 */
10956#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10957 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10958
10959/** Commits the memory and unmaps the guest memory.
10960 * @remarks May return.
10961 */
10962#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10963 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10964
10965/** Commits the memory and unmaps the guest memory unless the FPU status word
10966 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10967 * that would cause FLD not to store.
10968 *
10969 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10970 * store, while \#P will not.
10971 *
10972 * @remarks May in theory return - for now.
10973 */
10974#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10975 do { \
10976 if ( !(a_u16FSW & X86_FSW_ES) \
10977 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10978 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10979 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10980 } while (0)
10981
10982/** Calculate efficient address from R/M. */
10983#ifndef IEM_WITH_SETJMP
10984# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10985 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10986#else
10987# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10988 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10989#endif
10990
10991#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10992#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10993#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10994#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10995#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10996#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10997#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10998
10999/**
11000 * Defers the rest of the instruction emulation to a C implementation routine
11001 * and returns, only taking the standard parameters.
11002 *
11003 * @param a_pfnCImpl The pointer to the C routine.
11004 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11005 */
11006#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11007
11008/**
11009 * Defers the rest of instruction emulation to a C implementation routine and
11010 * returns, taking one argument in addition to the standard ones.
11011 *
11012 * @param a_pfnCImpl The pointer to the C routine.
11013 * @param a0 The argument.
11014 */
11015#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11016
11017/**
11018 * Defers the rest of the instruction emulation to a C implementation routine
11019 * and returns, taking two arguments in addition to the standard ones.
11020 *
11021 * @param a_pfnCImpl The pointer to the C routine.
11022 * @param a0 The first extra argument.
11023 * @param a1 The second extra argument.
11024 */
11025#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11026
11027/**
11028 * Defers the rest of the instruction emulation to a C implementation routine
11029 * and returns, taking three arguments in addition to the standard ones.
11030 *
11031 * @param a_pfnCImpl The pointer to the C routine.
11032 * @param a0 The first extra argument.
11033 * @param a1 The second extra argument.
11034 * @param a2 The third extra argument.
11035 */
11036#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11037
11038/**
11039 * Defers the rest of the instruction emulation to a C implementation routine
11040 * and returns, taking four arguments in addition to the standard ones.
11041 *
11042 * @param a_pfnCImpl The pointer to the C routine.
11043 * @param a0 The first extra argument.
11044 * @param a1 The second extra argument.
11045 * @param a2 The third extra argument.
11046 * @param a3 The fourth extra argument.
11047 */
11048#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11049
11050/**
11051 * Defers the rest of the instruction emulation to a C implementation routine
11052 * and returns, taking two arguments in addition to the standard ones.
11053 *
11054 * @param a_pfnCImpl The pointer to the C routine.
11055 * @param a0 The first extra argument.
11056 * @param a1 The second extra argument.
11057 * @param a2 The third extra argument.
11058 * @param a3 The fourth extra argument.
11059 * @param a4 The fifth extra argument.
11060 */
11061#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11062
11063/**
11064 * Defers the entire instruction emulation to a C implementation routine and
11065 * returns, only taking the standard parameters.
11066 *
11067 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11068 *
11069 * @param a_pfnCImpl The pointer to the C routine.
11070 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11071 */
11072#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11073
11074/**
11075 * Defers the entire instruction emulation to a C implementation routine and
11076 * returns, taking one argument in addition to the standard ones.
11077 *
11078 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11079 *
11080 * @param a_pfnCImpl The pointer to the C routine.
11081 * @param a0 The argument.
11082 */
11083#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11084
11085/**
11086 * Defers the entire instruction emulation to a C implementation routine and
11087 * returns, taking two arguments in addition to the standard ones.
11088 *
11089 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11090 *
11091 * @param a_pfnCImpl The pointer to the C routine.
11092 * @param a0 The first extra argument.
11093 * @param a1 The second extra argument.
11094 */
11095#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11096
11097/**
11098 * Defers the entire instruction emulation to a C implementation routine and
11099 * returns, taking three arguments in addition to the standard ones.
11100 *
11101 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11102 *
11103 * @param a_pfnCImpl The pointer to the C routine.
11104 * @param a0 The first extra argument.
11105 * @param a1 The second extra argument.
11106 * @param a2 The third extra argument.
11107 */
11108#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11109
11110/**
11111 * Calls a FPU assembly implementation taking one visible argument.
11112 *
11113 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11114 * @param a0 The first extra argument.
11115 */
11116#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11117 do { \
11118 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11119 } while (0)
11120
11121/**
11122 * Calls a FPU assembly implementation taking two visible arguments.
11123 *
11124 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11125 * @param a0 The first extra argument.
11126 * @param a1 The second extra argument.
11127 */
11128#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11129 do { \
11130 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11131 } while (0)
11132
11133/**
11134 * Calls a FPU assembly implementation taking three visible arguments.
11135 *
11136 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11137 * @param a0 The first extra argument.
11138 * @param a1 The second extra argument.
11139 * @param a2 The third extra argument.
11140 */
11141#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11142 do { \
11143 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11144 } while (0)
11145
11146#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11147 do { \
11148 (a_FpuData).FSW = (a_FSW); \
11149 (a_FpuData).r80Result = *(a_pr80Value); \
11150 } while (0)
11151
11152/** Pushes FPU result onto the stack. */
11153#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11154 iemFpuPushResult(pVCpu, &a_FpuData)
11155/** Pushes FPU result onto the stack and sets the FPUDP. */
11156#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11157 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11158
11159/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11160#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11161 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11162
11163/** Stores FPU result in a stack register. */
11164#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11165 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11166/** Stores FPU result in a stack register and pops the stack. */
11167#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11168 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11169/** Stores FPU result in a stack register and sets the FPUDP. */
11170#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11171 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11172/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11173 * stack. */
11174#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11175 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11176
11177/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11178#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11179 iemFpuUpdateOpcodeAndIp(pVCpu)
11180/** Free a stack register (for FFREE and FFREEP). */
11181#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11182 iemFpuStackFree(pVCpu, a_iStReg)
11183/** Increment the FPU stack pointer. */
11184#define IEM_MC_FPU_STACK_INC_TOP() \
11185 iemFpuStackIncTop(pVCpu)
11186/** Decrement the FPU stack pointer. */
11187#define IEM_MC_FPU_STACK_DEC_TOP() \
11188 iemFpuStackDecTop(pVCpu)
11189
11190/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11191#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11192 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11193/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11194#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11195 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11196/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11197#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11198 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11199/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11200#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11201 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11202/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11203 * stack. */
11204#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11205 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11206/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11207#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11208 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11209
11210/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11211#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11212 iemFpuStackUnderflow(pVCpu, a_iStDst)
11213/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11214 * stack. */
11215#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11216 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11217/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11218 * FPUDS. */
11219#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11220 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11221/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11222 * FPUDS. Pops stack. */
11223#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11224 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11225/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11226 * stack twice. */
11227#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11228 iemFpuStackUnderflowThenPopPop(pVCpu)
11229/** Raises a FPU stack underflow exception for an instruction pushing a result
11230 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11231#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11232 iemFpuStackPushUnderflow(pVCpu)
11233/** Raises a FPU stack underflow exception for an instruction pushing a result
11234 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11235#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11236 iemFpuStackPushUnderflowTwo(pVCpu)
11237
11238/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11239 * FPUIP, FPUCS and FOP. */
11240#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11241 iemFpuStackPushOverflow(pVCpu)
11242/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11243 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11244#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11245 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11246/** Prepares for using the FPU state.
11247 * Ensures that we can use the host FPU in the current context (RC+R0.
11248 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11249#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11250/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11251#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11252/** Actualizes the guest FPU state so it can be accessed and modified. */
11253#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11254
11255/** Prepares for using the SSE state.
11256 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11257 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11258#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11259/** Actualizes the guest XMM0..15 register state for read-only access. */
11260#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11261/** Actualizes the guest XMM0..15 register state for read-write access. */
11262#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11263
11264/**
11265 * Calls a MMX assembly implementation taking two visible arguments.
11266 *
11267 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11268 * @param a0 The first extra argument.
11269 * @param a1 The second extra argument.
11270 */
11271#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11272 do { \
11273 IEM_MC_PREPARE_FPU_USAGE(); \
11274 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11275 } while (0)
11276
11277/**
11278 * Calls a MMX assembly implementation taking three visible arguments.
11279 *
11280 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11281 * @param a0 The first extra argument.
11282 * @param a1 The second extra argument.
11283 * @param a2 The third extra argument.
11284 */
11285#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11286 do { \
11287 IEM_MC_PREPARE_FPU_USAGE(); \
11288 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11289 } while (0)
11290
11291
11292/**
11293 * Calls a SSE assembly implementation taking two visible arguments.
11294 *
11295 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11296 * @param a0 The first extra argument.
11297 * @param a1 The second extra argument.
11298 */
11299#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11300 do { \
11301 IEM_MC_PREPARE_SSE_USAGE(); \
11302 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11303 } while (0)
11304
11305/**
11306 * Calls a SSE assembly implementation taking three visible arguments.
11307 *
11308 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11309 * @param a0 The first extra argument.
11310 * @param a1 The second extra argument.
11311 * @param a2 The third extra argument.
11312 */
11313#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11314 do { \
11315 IEM_MC_PREPARE_SSE_USAGE(); \
11316 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11317 } while (0)
11318
11319/** @note Not for IOPL or IF testing. */
11320#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11321/** @note Not for IOPL or IF testing. */
11322#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11323/** @note Not for IOPL or IF testing. */
11324#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11325/** @note Not for IOPL or IF testing. */
11326#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11327/** @note Not for IOPL or IF testing. */
11328#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11329 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11330 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11331/** @note Not for IOPL or IF testing. */
11332#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11333 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11334 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11335/** @note Not for IOPL or IF testing. */
11336#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11337 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11338 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11339 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11340/** @note Not for IOPL or IF testing. */
11341#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11342 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11343 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11344 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11345#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11346#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11347#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11348/** @note Not for IOPL or IF testing. */
11349#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11350 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11351 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11352/** @note Not for IOPL or IF testing. */
11353#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11354 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11355 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11356/** @note Not for IOPL or IF testing. */
11357#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11358 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11359 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11360/** @note Not for IOPL or IF testing. */
11361#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11362 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11363 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11364/** @note Not for IOPL or IF testing. */
11365#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11366 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11367 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11368/** @note Not for IOPL or IF testing. */
11369#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11370 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11371 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11372#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11373#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11374
11375#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11376 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11377#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11378 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11379#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11380 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11381#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11382 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11383#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11384 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11385#define IEM_MC_IF_FCW_IM() \
11386 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11387
11388#define IEM_MC_ELSE() } else {
11389#define IEM_MC_ENDIF() } do {} while (0)
11390
11391/** @} */
11392
11393
11394/** @name Opcode Debug Helpers.
11395 * @{
11396 */
11397#ifdef VBOX_WITH_STATISTICS
11398# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11399#else
11400# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11401#endif
11402
11403#ifdef DEBUG
11404# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11405 do { \
11406 IEMOP_INC_STATS(a_Stats); \
11407 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11408 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11409 } while (0)
11410#else
11411# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11412#endif
11413
11414/** @} */
11415
11416
11417/** @name Opcode Helpers.
11418 * @{
11419 */
11420
11421#ifdef IN_RING3
11422# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11423 do { \
11424 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11425 else \
11426 { \
11427 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11428 return IEMOP_RAISE_INVALID_OPCODE(); \
11429 } \
11430 } while (0)
11431#else
11432# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11433 do { \
11434 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11435 else return IEMOP_RAISE_INVALID_OPCODE(); \
11436 } while (0)
11437#endif
11438
11439/** The instruction requires a 186 or later. */
11440#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11441# define IEMOP_HLP_MIN_186() do { } while (0)
11442#else
11443# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11444#endif
11445
11446/** The instruction requires a 286 or later. */
11447#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11448# define IEMOP_HLP_MIN_286() do { } while (0)
11449#else
11450# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11451#endif
11452
11453/** The instruction requires a 386 or later. */
11454#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11455# define IEMOP_HLP_MIN_386() do { } while (0)
11456#else
11457# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11458#endif
11459
11460/** The instruction requires a 386 or later if the given expression is true. */
11461#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11462# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11463#else
11464# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11465#endif
11466
11467/** The instruction requires a 486 or later. */
11468#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11469# define IEMOP_HLP_MIN_486() do { } while (0)
11470#else
11471# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11472#endif
11473
11474/** The instruction requires a Pentium (586) or later. */
11475#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11476# define IEMOP_HLP_MIN_586() do { } while (0)
11477#else
11478# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11479#endif
11480
11481/** The instruction requires a PentiumPro (686) or later. */
11482#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11483# define IEMOP_HLP_MIN_686() do { } while (0)
11484#else
11485# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11486#endif
11487
11488
11489/** The instruction raises an \#UD in real and V8086 mode. */
11490#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11491 do \
11492 { \
11493 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11494 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11495 } while (0)
11496
11497/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11498 * 64-bit mode. */
11499#define IEMOP_HLP_NO_64BIT() \
11500 do \
11501 { \
11502 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11503 return IEMOP_RAISE_INVALID_OPCODE(); \
11504 } while (0)
11505
11506/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11507 * 64-bit mode. */
11508#define IEMOP_HLP_ONLY_64BIT() \
11509 do \
11510 { \
11511 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11512 return IEMOP_RAISE_INVALID_OPCODE(); \
11513 } while (0)
11514
11515/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11516#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11517 do \
11518 { \
11519 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11520 iemRecalEffOpSize64Default(pVCpu); \
11521 } while (0)
11522
11523/** The instruction has 64-bit operand size if 64-bit mode. */
11524#define IEMOP_HLP_64BIT_OP_SIZE() \
11525 do \
11526 { \
11527 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11528 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11529 } while (0)
11530
11531/** Only a REX prefix immediately preceeding the first opcode byte takes
11532 * effect. This macro helps ensuring this as well as logging bad guest code. */
11533#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11534 do \
11535 { \
11536 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11537 { \
11538 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11539 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11540 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11541 pVCpu->iem.s.uRexB = 0; \
11542 pVCpu->iem.s.uRexIndex = 0; \
11543 pVCpu->iem.s.uRexReg = 0; \
11544 iemRecalEffOpSize(pVCpu); \
11545 } \
11546 } while (0)
11547
11548/**
11549 * Done decoding.
11550 */
11551#define IEMOP_HLP_DONE_DECODING() \
11552 do \
11553 { \
11554 /*nothing for now, maybe later... */ \
11555 } while (0)
11556
11557/**
11558 * Done decoding, raise \#UD exception if lock prefix present.
11559 */
11560#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11561 do \
11562 { \
11563 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11564 { /* likely */ } \
11565 else \
11566 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11567 } while (0)
11568#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11569 do \
11570 { \
11571 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11572 { /* likely */ } \
11573 else \
11574 { \
11575 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11576 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11577 } \
11578 } while (0)
11579#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11580 do \
11581 { \
11582 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11583 { /* likely */ } \
11584 else \
11585 { \
11586 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11587 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11588 } \
11589 } while (0)
11590
11591/**
11592 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11593 * are present.
11594 */
11595#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11596 do \
11597 { \
11598 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11599 { /* likely */ } \
11600 else \
11601 return IEMOP_RAISE_INVALID_OPCODE(); \
11602 } while (0)
11603
11604
11605/**
11606 * Calculates the effective address of a ModR/M memory operand.
11607 *
11608 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11609 *
11610 * @return Strict VBox status code.
11611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11612 * @param bRm The ModRM byte.
11613 * @param cbImm The size of any immediate following the
11614 * effective address opcode bytes. Important for
11615 * RIP relative addressing.
11616 * @param pGCPtrEff Where to return the effective address.
11617 */
11618IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11619{
11620 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11621 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11622# define SET_SS_DEF() \
11623 do \
11624 { \
11625 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11626 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11627 } while (0)
11628
11629 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11630 {
11631/** @todo Check the effective address size crap! */
11632 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11633 {
11634 uint16_t u16EffAddr;
11635
11636 /* Handle the disp16 form with no registers first. */
11637 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11638 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11639 else
11640 {
11641 /* Get the displacment. */
11642 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11643 {
11644 case 0: u16EffAddr = 0; break;
11645 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11646 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11647 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11648 }
11649
11650 /* Add the base and index registers to the disp. */
11651 switch (bRm & X86_MODRM_RM_MASK)
11652 {
11653 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11654 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11655 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11656 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11657 case 4: u16EffAddr += pCtx->si; break;
11658 case 5: u16EffAddr += pCtx->di; break;
11659 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11660 case 7: u16EffAddr += pCtx->bx; break;
11661 }
11662 }
11663
11664 *pGCPtrEff = u16EffAddr;
11665 }
11666 else
11667 {
11668 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11669 uint32_t u32EffAddr;
11670
11671 /* Handle the disp32 form with no registers first. */
11672 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11673 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11674 else
11675 {
11676 /* Get the register (or SIB) value. */
11677 switch ((bRm & X86_MODRM_RM_MASK))
11678 {
11679 case 0: u32EffAddr = pCtx->eax; break;
11680 case 1: u32EffAddr = pCtx->ecx; break;
11681 case 2: u32EffAddr = pCtx->edx; break;
11682 case 3: u32EffAddr = pCtx->ebx; break;
11683 case 4: /* SIB */
11684 {
11685 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11686
11687 /* Get the index and scale it. */
11688 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11689 {
11690 case 0: u32EffAddr = pCtx->eax; break;
11691 case 1: u32EffAddr = pCtx->ecx; break;
11692 case 2: u32EffAddr = pCtx->edx; break;
11693 case 3: u32EffAddr = pCtx->ebx; break;
11694 case 4: u32EffAddr = 0; /*none */ break;
11695 case 5: u32EffAddr = pCtx->ebp; break;
11696 case 6: u32EffAddr = pCtx->esi; break;
11697 case 7: u32EffAddr = pCtx->edi; break;
11698 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11699 }
11700 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11701
11702 /* add base */
11703 switch (bSib & X86_SIB_BASE_MASK)
11704 {
11705 case 0: u32EffAddr += pCtx->eax; break;
11706 case 1: u32EffAddr += pCtx->ecx; break;
11707 case 2: u32EffAddr += pCtx->edx; break;
11708 case 3: u32EffAddr += pCtx->ebx; break;
11709 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11710 case 5:
11711 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11712 {
11713 u32EffAddr += pCtx->ebp;
11714 SET_SS_DEF();
11715 }
11716 else
11717 {
11718 uint32_t u32Disp;
11719 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11720 u32EffAddr += u32Disp;
11721 }
11722 break;
11723 case 6: u32EffAddr += pCtx->esi; break;
11724 case 7: u32EffAddr += pCtx->edi; break;
11725 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11726 }
11727 break;
11728 }
11729 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11730 case 6: u32EffAddr = pCtx->esi; break;
11731 case 7: u32EffAddr = pCtx->edi; break;
11732 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11733 }
11734
11735 /* Get and add the displacement. */
11736 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11737 {
11738 case 0:
11739 break;
11740 case 1:
11741 {
11742 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11743 u32EffAddr += i8Disp;
11744 break;
11745 }
11746 case 2:
11747 {
11748 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11749 u32EffAddr += u32Disp;
11750 break;
11751 }
11752 default:
11753 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11754 }
11755
11756 }
11757 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11758 *pGCPtrEff = u32EffAddr;
11759 else
11760 {
11761 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11762 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11763 }
11764 }
11765 }
11766 else
11767 {
11768 uint64_t u64EffAddr;
11769
11770 /* Handle the rip+disp32 form with no registers first. */
11771 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11772 {
11773 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11774 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11775 }
11776 else
11777 {
11778 /* Get the register (or SIB) value. */
11779 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11780 {
11781 case 0: u64EffAddr = pCtx->rax; break;
11782 case 1: u64EffAddr = pCtx->rcx; break;
11783 case 2: u64EffAddr = pCtx->rdx; break;
11784 case 3: u64EffAddr = pCtx->rbx; break;
11785 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11786 case 6: u64EffAddr = pCtx->rsi; break;
11787 case 7: u64EffAddr = pCtx->rdi; break;
11788 case 8: u64EffAddr = pCtx->r8; break;
11789 case 9: u64EffAddr = pCtx->r9; break;
11790 case 10: u64EffAddr = pCtx->r10; break;
11791 case 11: u64EffAddr = pCtx->r11; break;
11792 case 13: u64EffAddr = pCtx->r13; break;
11793 case 14: u64EffAddr = pCtx->r14; break;
11794 case 15: u64EffAddr = pCtx->r15; break;
11795 /* SIB */
11796 case 4:
11797 case 12:
11798 {
11799 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11800
11801 /* Get the index and scale it. */
11802 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11803 {
11804 case 0: u64EffAddr = pCtx->rax; break;
11805 case 1: u64EffAddr = pCtx->rcx; break;
11806 case 2: u64EffAddr = pCtx->rdx; break;
11807 case 3: u64EffAddr = pCtx->rbx; break;
11808 case 4: u64EffAddr = 0; /*none */ break;
11809 case 5: u64EffAddr = pCtx->rbp; break;
11810 case 6: u64EffAddr = pCtx->rsi; break;
11811 case 7: u64EffAddr = pCtx->rdi; break;
11812 case 8: u64EffAddr = pCtx->r8; break;
11813 case 9: u64EffAddr = pCtx->r9; break;
11814 case 10: u64EffAddr = pCtx->r10; break;
11815 case 11: u64EffAddr = pCtx->r11; break;
11816 case 12: u64EffAddr = pCtx->r12; break;
11817 case 13: u64EffAddr = pCtx->r13; break;
11818 case 14: u64EffAddr = pCtx->r14; break;
11819 case 15: u64EffAddr = pCtx->r15; break;
11820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11821 }
11822 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11823
11824 /* add base */
11825 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11826 {
11827 case 0: u64EffAddr += pCtx->rax; break;
11828 case 1: u64EffAddr += pCtx->rcx; break;
11829 case 2: u64EffAddr += pCtx->rdx; break;
11830 case 3: u64EffAddr += pCtx->rbx; break;
11831 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11832 case 6: u64EffAddr += pCtx->rsi; break;
11833 case 7: u64EffAddr += pCtx->rdi; break;
11834 case 8: u64EffAddr += pCtx->r8; break;
11835 case 9: u64EffAddr += pCtx->r9; break;
11836 case 10: u64EffAddr += pCtx->r10; break;
11837 case 11: u64EffAddr += pCtx->r11; break;
11838 case 12: u64EffAddr += pCtx->r12; break;
11839 case 14: u64EffAddr += pCtx->r14; break;
11840 case 15: u64EffAddr += pCtx->r15; break;
11841 /* complicated encodings */
11842 case 5:
11843 case 13:
11844 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11845 {
11846 if (!pVCpu->iem.s.uRexB)
11847 {
11848 u64EffAddr += pCtx->rbp;
11849 SET_SS_DEF();
11850 }
11851 else
11852 u64EffAddr += pCtx->r13;
11853 }
11854 else
11855 {
11856 uint32_t u32Disp;
11857 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11858 u64EffAddr += (int32_t)u32Disp;
11859 }
11860 break;
11861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11862 }
11863 break;
11864 }
11865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11866 }
11867
11868 /* Get and add the displacement. */
11869 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11870 {
11871 case 0:
11872 break;
11873 case 1:
11874 {
11875 int8_t i8Disp;
11876 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11877 u64EffAddr += i8Disp;
11878 break;
11879 }
11880 case 2:
11881 {
11882 uint32_t u32Disp;
11883 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11884 u64EffAddr += (int32_t)u32Disp;
11885 break;
11886 }
11887 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11888 }
11889
11890 }
11891
11892 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11893 *pGCPtrEff = u64EffAddr;
11894 else
11895 {
11896 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11897 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11898 }
11899 }
11900
11901 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11902 return VINF_SUCCESS;
11903}
11904
11905
11906/**
11907 * Calculates the effective address of a ModR/M memory operand.
11908 *
11909 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11910 *
11911 * @return Strict VBox status code.
11912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11913 * @param bRm The ModRM byte.
11914 * @param cbImm The size of any immediate following the
11915 * effective address opcode bytes. Important for
11916 * RIP relative addressing.
11917 * @param pGCPtrEff Where to return the effective address.
11918 * @param offRsp RSP displacement.
11919 */
11920IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11921{
11922 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11923 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11924# define SET_SS_DEF() \
11925 do \
11926 { \
11927 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11928 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11929 } while (0)
11930
11931 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11932 {
11933/** @todo Check the effective address size crap! */
11934 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11935 {
11936 uint16_t u16EffAddr;
11937
11938 /* Handle the disp16 form with no registers first. */
11939 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11940 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11941 else
11942 {
11943 /* Get the displacment. */
11944 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11945 {
11946 case 0: u16EffAddr = 0; break;
11947 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11948 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11949 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11950 }
11951
11952 /* Add the base and index registers to the disp. */
11953 switch (bRm & X86_MODRM_RM_MASK)
11954 {
11955 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11956 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11957 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11958 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11959 case 4: u16EffAddr += pCtx->si; break;
11960 case 5: u16EffAddr += pCtx->di; break;
11961 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11962 case 7: u16EffAddr += pCtx->bx; break;
11963 }
11964 }
11965
11966 *pGCPtrEff = u16EffAddr;
11967 }
11968 else
11969 {
11970 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11971 uint32_t u32EffAddr;
11972
11973 /* Handle the disp32 form with no registers first. */
11974 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11975 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11976 else
11977 {
11978 /* Get the register (or SIB) value. */
11979 switch ((bRm & X86_MODRM_RM_MASK))
11980 {
11981 case 0: u32EffAddr = pCtx->eax; break;
11982 case 1: u32EffAddr = pCtx->ecx; break;
11983 case 2: u32EffAddr = pCtx->edx; break;
11984 case 3: u32EffAddr = pCtx->ebx; break;
11985 case 4: /* SIB */
11986 {
11987 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11988
11989 /* Get the index and scale it. */
11990 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11991 {
11992 case 0: u32EffAddr = pCtx->eax; break;
11993 case 1: u32EffAddr = pCtx->ecx; break;
11994 case 2: u32EffAddr = pCtx->edx; break;
11995 case 3: u32EffAddr = pCtx->ebx; break;
11996 case 4: u32EffAddr = 0; /*none */ break;
11997 case 5: u32EffAddr = pCtx->ebp; break;
11998 case 6: u32EffAddr = pCtx->esi; break;
11999 case 7: u32EffAddr = pCtx->edi; break;
12000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12001 }
12002 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12003
12004 /* add base */
12005 switch (bSib & X86_SIB_BASE_MASK)
12006 {
12007 case 0: u32EffAddr += pCtx->eax; break;
12008 case 1: u32EffAddr += pCtx->ecx; break;
12009 case 2: u32EffAddr += pCtx->edx; break;
12010 case 3: u32EffAddr += pCtx->ebx; break;
12011 case 4:
12012 u32EffAddr += pCtx->esp + offRsp;
12013 SET_SS_DEF();
12014 break;
12015 case 5:
12016 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12017 {
12018 u32EffAddr += pCtx->ebp;
12019 SET_SS_DEF();
12020 }
12021 else
12022 {
12023 uint32_t u32Disp;
12024 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12025 u32EffAddr += u32Disp;
12026 }
12027 break;
12028 case 6: u32EffAddr += pCtx->esi; break;
12029 case 7: u32EffAddr += pCtx->edi; break;
12030 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12031 }
12032 break;
12033 }
12034 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12035 case 6: u32EffAddr = pCtx->esi; break;
12036 case 7: u32EffAddr = pCtx->edi; break;
12037 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12038 }
12039
12040 /* Get and add the displacement. */
12041 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12042 {
12043 case 0:
12044 break;
12045 case 1:
12046 {
12047 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12048 u32EffAddr += i8Disp;
12049 break;
12050 }
12051 case 2:
12052 {
12053 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12054 u32EffAddr += u32Disp;
12055 break;
12056 }
12057 default:
12058 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12059 }
12060
12061 }
12062 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12063 *pGCPtrEff = u32EffAddr;
12064 else
12065 {
12066 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12067 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12068 }
12069 }
12070 }
12071 else
12072 {
12073 uint64_t u64EffAddr;
12074
12075 /* Handle the rip+disp32 form with no registers first. */
12076 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12077 {
12078 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12079 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12080 }
12081 else
12082 {
12083 /* Get the register (or SIB) value. */
12084 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12085 {
12086 case 0: u64EffAddr = pCtx->rax; break;
12087 case 1: u64EffAddr = pCtx->rcx; break;
12088 case 2: u64EffAddr = pCtx->rdx; break;
12089 case 3: u64EffAddr = pCtx->rbx; break;
12090 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12091 case 6: u64EffAddr = pCtx->rsi; break;
12092 case 7: u64EffAddr = pCtx->rdi; break;
12093 case 8: u64EffAddr = pCtx->r8; break;
12094 case 9: u64EffAddr = pCtx->r9; break;
12095 case 10: u64EffAddr = pCtx->r10; break;
12096 case 11: u64EffAddr = pCtx->r11; break;
12097 case 13: u64EffAddr = pCtx->r13; break;
12098 case 14: u64EffAddr = pCtx->r14; break;
12099 case 15: u64EffAddr = pCtx->r15; break;
12100 /* SIB */
12101 case 4:
12102 case 12:
12103 {
12104 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12105
12106 /* Get the index and scale it. */
12107 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12108 {
12109 case 0: u64EffAddr = pCtx->rax; break;
12110 case 1: u64EffAddr = pCtx->rcx; break;
12111 case 2: u64EffAddr = pCtx->rdx; break;
12112 case 3: u64EffAddr = pCtx->rbx; break;
12113 case 4: u64EffAddr = 0; /*none */ break;
12114 case 5: u64EffAddr = pCtx->rbp; break;
12115 case 6: u64EffAddr = pCtx->rsi; break;
12116 case 7: u64EffAddr = pCtx->rdi; break;
12117 case 8: u64EffAddr = pCtx->r8; break;
12118 case 9: u64EffAddr = pCtx->r9; break;
12119 case 10: u64EffAddr = pCtx->r10; break;
12120 case 11: u64EffAddr = pCtx->r11; break;
12121 case 12: u64EffAddr = pCtx->r12; break;
12122 case 13: u64EffAddr = pCtx->r13; break;
12123 case 14: u64EffAddr = pCtx->r14; break;
12124 case 15: u64EffAddr = pCtx->r15; break;
12125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12126 }
12127 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12128
12129 /* add base */
12130 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12131 {
12132 case 0: u64EffAddr += pCtx->rax; break;
12133 case 1: u64EffAddr += pCtx->rcx; break;
12134 case 2: u64EffAddr += pCtx->rdx; break;
12135 case 3: u64EffAddr += pCtx->rbx; break;
12136 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12137 case 6: u64EffAddr += pCtx->rsi; break;
12138 case 7: u64EffAddr += pCtx->rdi; break;
12139 case 8: u64EffAddr += pCtx->r8; break;
12140 case 9: u64EffAddr += pCtx->r9; break;
12141 case 10: u64EffAddr += pCtx->r10; break;
12142 case 11: u64EffAddr += pCtx->r11; break;
12143 case 12: u64EffAddr += pCtx->r12; break;
12144 case 14: u64EffAddr += pCtx->r14; break;
12145 case 15: u64EffAddr += pCtx->r15; break;
12146 /* complicated encodings */
12147 case 5:
12148 case 13:
12149 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12150 {
12151 if (!pVCpu->iem.s.uRexB)
12152 {
12153 u64EffAddr += pCtx->rbp;
12154 SET_SS_DEF();
12155 }
12156 else
12157 u64EffAddr += pCtx->r13;
12158 }
12159 else
12160 {
12161 uint32_t u32Disp;
12162 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12163 u64EffAddr += (int32_t)u32Disp;
12164 }
12165 break;
12166 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12167 }
12168 break;
12169 }
12170 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12171 }
12172
12173 /* Get and add the displacement. */
12174 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12175 {
12176 case 0:
12177 break;
12178 case 1:
12179 {
12180 int8_t i8Disp;
12181 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12182 u64EffAddr += i8Disp;
12183 break;
12184 }
12185 case 2:
12186 {
12187 uint32_t u32Disp;
12188 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12189 u64EffAddr += (int32_t)u32Disp;
12190 break;
12191 }
12192 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12193 }
12194
12195 }
12196
12197 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12198 *pGCPtrEff = u64EffAddr;
12199 else
12200 {
12201 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12202 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12203 }
12204 }
12205
12206 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12207 return VINF_SUCCESS;
12208}
12209
12210
12211#ifdef IEM_WITH_SETJMP
12212/**
12213 * Calculates the effective address of a ModR/M memory operand.
12214 *
12215 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12216 *
12217 * May longjmp on internal error.
12218 *
12219 * @return The effective address.
12220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12221 * @param bRm The ModRM byte.
12222 * @param cbImm The size of any immediate following the
12223 * effective address opcode bytes. Important for
12224 * RIP relative addressing.
12225 */
12226IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12227{
12228 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12229 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12230# define SET_SS_DEF() \
12231 do \
12232 { \
12233 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12234 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12235 } while (0)
12236
12237 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12238 {
12239/** @todo Check the effective address size crap! */
12240 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12241 {
12242 uint16_t u16EffAddr;
12243
12244 /* Handle the disp16 form with no registers first. */
12245 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12246 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12247 else
12248 {
12249 /* Get the displacment. */
12250 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12251 {
12252 case 0: u16EffAddr = 0; break;
12253 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12254 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12255 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12256 }
12257
12258 /* Add the base and index registers to the disp. */
12259 switch (bRm & X86_MODRM_RM_MASK)
12260 {
12261 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12262 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12263 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12264 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12265 case 4: u16EffAddr += pCtx->si; break;
12266 case 5: u16EffAddr += pCtx->di; break;
12267 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12268 case 7: u16EffAddr += pCtx->bx; break;
12269 }
12270 }
12271
12272 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12273 return u16EffAddr;
12274 }
12275
12276 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12277 uint32_t u32EffAddr;
12278
12279 /* Handle the disp32 form with no registers first. */
12280 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12281 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12282 else
12283 {
12284 /* Get the register (or SIB) value. */
12285 switch ((bRm & X86_MODRM_RM_MASK))
12286 {
12287 case 0: u32EffAddr = pCtx->eax; break;
12288 case 1: u32EffAddr = pCtx->ecx; break;
12289 case 2: u32EffAddr = pCtx->edx; break;
12290 case 3: u32EffAddr = pCtx->ebx; break;
12291 case 4: /* SIB */
12292 {
12293 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12294
12295 /* Get the index and scale it. */
12296 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12297 {
12298 case 0: u32EffAddr = pCtx->eax; break;
12299 case 1: u32EffAddr = pCtx->ecx; break;
12300 case 2: u32EffAddr = pCtx->edx; break;
12301 case 3: u32EffAddr = pCtx->ebx; break;
12302 case 4: u32EffAddr = 0; /*none */ break;
12303 case 5: u32EffAddr = pCtx->ebp; break;
12304 case 6: u32EffAddr = pCtx->esi; break;
12305 case 7: u32EffAddr = pCtx->edi; break;
12306 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12307 }
12308 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12309
12310 /* add base */
12311 switch (bSib & X86_SIB_BASE_MASK)
12312 {
12313 case 0: u32EffAddr += pCtx->eax; break;
12314 case 1: u32EffAddr += pCtx->ecx; break;
12315 case 2: u32EffAddr += pCtx->edx; break;
12316 case 3: u32EffAddr += pCtx->ebx; break;
12317 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12318 case 5:
12319 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12320 {
12321 u32EffAddr += pCtx->ebp;
12322 SET_SS_DEF();
12323 }
12324 else
12325 {
12326 uint32_t u32Disp;
12327 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12328 u32EffAddr += u32Disp;
12329 }
12330 break;
12331 case 6: u32EffAddr += pCtx->esi; break;
12332 case 7: u32EffAddr += pCtx->edi; break;
12333 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12334 }
12335 break;
12336 }
12337 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12338 case 6: u32EffAddr = pCtx->esi; break;
12339 case 7: u32EffAddr = pCtx->edi; break;
12340 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12341 }
12342
12343 /* Get and add the displacement. */
12344 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12345 {
12346 case 0:
12347 break;
12348 case 1:
12349 {
12350 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12351 u32EffAddr += i8Disp;
12352 break;
12353 }
12354 case 2:
12355 {
12356 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12357 u32EffAddr += u32Disp;
12358 break;
12359 }
12360 default:
12361 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12362 }
12363 }
12364
12365 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12366 {
12367 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12368 return u32EffAddr;
12369 }
12370 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12371 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12372 return u32EffAddr & UINT16_MAX;
12373 }
12374
12375 uint64_t u64EffAddr;
12376
12377 /* Handle the rip+disp32 form with no registers first. */
12378 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12379 {
12380 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12381 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12382 }
12383 else
12384 {
12385 /* Get the register (or SIB) value. */
12386 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12387 {
12388 case 0: u64EffAddr = pCtx->rax; break;
12389 case 1: u64EffAddr = pCtx->rcx; break;
12390 case 2: u64EffAddr = pCtx->rdx; break;
12391 case 3: u64EffAddr = pCtx->rbx; break;
12392 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12393 case 6: u64EffAddr = pCtx->rsi; break;
12394 case 7: u64EffAddr = pCtx->rdi; break;
12395 case 8: u64EffAddr = pCtx->r8; break;
12396 case 9: u64EffAddr = pCtx->r9; break;
12397 case 10: u64EffAddr = pCtx->r10; break;
12398 case 11: u64EffAddr = pCtx->r11; break;
12399 case 13: u64EffAddr = pCtx->r13; break;
12400 case 14: u64EffAddr = pCtx->r14; break;
12401 case 15: u64EffAddr = pCtx->r15; break;
12402 /* SIB */
12403 case 4:
12404 case 12:
12405 {
12406 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12407
12408 /* Get the index and scale it. */
12409 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12410 {
12411 case 0: u64EffAddr = pCtx->rax; break;
12412 case 1: u64EffAddr = pCtx->rcx; break;
12413 case 2: u64EffAddr = pCtx->rdx; break;
12414 case 3: u64EffAddr = pCtx->rbx; break;
12415 case 4: u64EffAddr = 0; /*none */ break;
12416 case 5: u64EffAddr = pCtx->rbp; break;
12417 case 6: u64EffAddr = pCtx->rsi; break;
12418 case 7: u64EffAddr = pCtx->rdi; break;
12419 case 8: u64EffAddr = pCtx->r8; break;
12420 case 9: u64EffAddr = pCtx->r9; break;
12421 case 10: u64EffAddr = pCtx->r10; break;
12422 case 11: u64EffAddr = pCtx->r11; break;
12423 case 12: u64EffAddr = pCtx->r12; break;
12424 case 13: u64EffAddr = pCtx->r13; break;
12425 case 14: u64EffAddr = pCtx->r14; break;
12426 case 15: u64EffAddr = pCtx->r15; break;
12427 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12428 }
12429 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12430
12431 /* add base */
12432 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12433 {
12434 case 0: u64EffAddr += pCtx->rax; break;
12435 case 1: u64EffAddr += pCtx->rcx; break;
12436 case 2: u64EffAddr += pCtx->rdx; break;
12437 case 3: u64EffAddr += pCtx->rbx; break;
12438 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12439 case 6: u64EffAddr += pCtx->rsi; break;
12440 case 7: u64EffAddr += pCtx->rdi; break;
12441 case 8: u64EffAddr += pCtx->r8; break;
12442 case 9: u64EffAddr += pCtx->r9; break;
12443 case 10: u64EffAddr += pCtx->r10; break;
12444 case 11: u64EffAddr += pCtx->r11; break;
12445 case 12: u64EffAddr += pCtx->r12; break;
12446 case 14: u64EffAddr += pCtx->r14; break;
12447 case 15: u64EffAddr += pCtx->r15; break;
12448 /* complicated encodings */
12449 case 5:
12450 case 13:
12451 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12452 {
12453 if (!pVCpu->iem.s.uRexB)
12454 {
12455 u64EffAddr += pCtx->rbp;
12456 SET_SS_DEF();
12457 }
12458 else
12459 u64EffAddr += pCtx->r13;
12460 }
12461 else
12462 {
12463 uint32_t u32Disp;
12464 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12465 u64EffAddr += (int32_t)u32Disp;
12466 }
12467 break;
12468 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12469 }
12470 break;
12471 }
12472 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12473 }
12474
12475 /* Get and add the displacement. */
12476 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12477 {
12478 case 0:
12479 break;
12480 case 1:
12481 {
12482 int8_t i8Disp;
12483 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12484 u64EffAddr += i8Disp;
12485 break;
12486 }
12487 case 2:
12488 {
12489 uint32_t u32Disp;
12490 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12491 u64EffAddr += (int32_t)u32Disp;
12492 break;
12493 }
12494 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12495 }
12496
12497 }
12498
12499 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12500 {
12501 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12502 return u64EffAddr;
12503 }
12504 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12505 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12506 return u64EffAddr & UINT32_MAX;
12507}
12508#endif /* IEM_WITH_SETJMP */
12509
12510
12511/** @} */
12512
12513
12514
12515/*
12516 * Include the instructions
12517 */
12518#include "IEMAllInstructions.cpp.h"
12519
12520
12521
12522
12523#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12524
12525/**
12526 * Sets up execution verification mode.
12527 */
12528IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12529{
12530 PVMCPU pVCpu = pVCpu;
12531 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12532
12533 /*
12534 * Always note down the address of the current instruction.
12535 */
12536 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12537 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12538
12539 /*
12540 * Enable verification and/or logging.
12541 */
12542 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12543 if ( fNewNoRem
12544 && ( 0
12545#if 0 /* auto enable on first paged protected mode interrupt */
12546 || ( pOrgCtx->eflags.Bits.u1IF
12547 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12548 && TRPMHasTrap(pVCpu)
12549 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12550#endif
12551#if 0
12552 || ( pOrgCtx->cs == 0x10
12553 && ( pOrgCtx->rip == 0x90119e3e
12554 || pOrgCtx->rip == 0x901d9810)
12555#endif
12556#if 0 /* Auto enable DSL - FPU stuff. */
12557 || ( pOrgCtx->cs == 0x10
12558 && (// pOrgCtx->rip == 0xc02ec07f
12559 //|| pOrgCtx->rip == 0xc02ec082
12560 //|| pOrgCtx->rip == 0xc02ec0c9
12561 0
12562 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12563#endif
12564#if 0 /* Auto enable DSL - fstp st0 stuff. */
12565 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12566#endif
12567#if 0
12568 || pOrgCtx->rip == 0x9022bb3a
12569#endif
12570#if 0
12571 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12572#endif
12573#if 0
12574 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12575 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12576#endif
12577#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12578 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12579 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12580 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12581#endif
12582#if 0 /* NT4SP1 - xadd early boot. */
12583 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12584#endif
12585#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12586 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12587#endif
12588#if 0 /* NT4SP1 - cmpxchg (AMD). */
12589 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12590#endif
12591#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12592 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12593#endif
12594#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12595 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12596
12597#endif
12598#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12599 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12600
12601#endif
12602#if 0 /* NT4SP1 - frstor [ecx] */
12603 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12604#endif
12605#if 0 /* xxxxxx - All long mode code. */
12606 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12607#endif
12608#if 0 /* rep movsq linux 3.7 64-bit boot. */
12609 || (pOrgCtx->rip == 0x0000000000100241)
12610#endif
12611#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12612 || (pOrgCtx->rip == 0x000000000215e240)
12613#endif
12614#if 0 /* DOS's size-overridden iret to v8086. */
12615 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12616#endif
12617 )
12618 )
12619 {
12620 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12621 RTLogFlags(NULL, "enabled");
12622 fNewNoRem = false;
12623 }
12624 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12625 {
12626 pVCpu->iem.s.fNoRem = fNewNoRem;
12627 if (!fNewNoRem)
12628 {
12629 LogAlways(("Enabling verification mode!\n"));
12630 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12631 }
12632 else
12633 LogAlways(("Disabling verification mode!\n"));
12634 }
12635
12636 /*
12637 * Switch state.
12638 */
12639 if (IEM_VERIFICATION_ENABLED(pVCpu))
12640 {
12641 static CPUMCTX s_DebugCtx; /* Ugly! */
12642
12643 s_DebugCtx = *pOrgCtx;
12644 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12645 }
12646
12647 /*
12648 * See if there is an interrupt pending in TRPM and inject it if we can.
12649 */
12650 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12651 if ( pOrgCtx->eflags.Bits.u1IF
12652 && TRPMHasTrap(pVCpu)
12653 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12654 {
12655 uint8_t u8TrapNo;
12656 TRPMEVENT enmType;
12657 RTGCUINT uErrCode;
12658 RTGCPTR uCr2;
12659 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12660 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12661 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12662 TRPMResetTrap(pVCpu);
12663 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12664 }
12665
12666 /*
12667 * Reset the counters.
12668 */
12669 pVCpu->iem.s.cIOReads = 0;
12670 pVCpu->iem.s.cIOWrites = 0;
12671 pVCpu->iem.s.fIgnoreRaxRdx = false;
12672 pVCpu->iem.s.fOverlappingMovs = false;
12673 pVCpu->iem.s.fProblematicMemory = false;
12674 pVCpu->iem.s.fUndefinedEFlags = 0;
12675
12676 if (IEM_VERIFICATION_ENABLED(pVCpu))
12677 {
12678 /*
12679 * Free all verification records.
12680 */
12681 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12682 pVCpu->iem.s.pIemEvtRecHead = NULL;
12683 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12684 do
12685 {
12686 while (pEvtRec)
12687 {
12688 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12689 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12690 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12691 pEvtRec = pNext;
12692 }
12693 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12694 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12695 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12696 } while (pEvtRec);
12697 }
12698}
12699
12700
12701/**
12702 * Allocate an event record.
12703 * @returns Pointer to a record.
12704 */
12705IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12706{
12707 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12708 return NULL;
12709
12710 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12711 if (pEvtRec)
12712 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12713 else
12714 {
12715 if (!pVCpu->iem.s.ppIemEvtRecNext)
12716 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12717
12718 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12719 if (!pEvtRec)
12720 return NULL;
12721 }
12722 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12723 pEvtRec->pNext = NULL;
12724 return pEvtRec;
12725}
12726
12727
12728/**
12729 * IOMMMIORead notification.
12730 */
12731VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12732{
12733 PVMCPU pVCpu = VMMGetCpu(pVM);
12734 if (!pVCpu)
12735 return;
12736 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12737 if (!pEvtRec)
12738 return;
12739 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12740 pEvtRec->u.RamRead.GCPhys = GCPhys;
12741 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12742 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12743 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12744}
12745
12746
12747/**
12748 * IOMMMIOWrite notification.
12749 */
12750VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12751{
12752 PVMCPU pVCpu = VMMGetCpu(pVM);
12753 if (!pVCpu)
12754 return;
12755 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12756 if (!pEvtRec)
12757 return;
12758 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12759 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12760 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12761 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12762 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12763 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12764 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12765 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12766 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12767}
12768
12769
12770/**
12771 * IOMIOPortRead notification.
12772 */
12773VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12774{
12775 PVMCPU pVCpu = VMMGetCpu(pVM);
12776 if (!pVCpu)
12777 return;
12778 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12779 if (!pEvtRec)
12780 return;
12781 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12782 pEvtRec->u.IOPortRead.Port = Port;
12783 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12784 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12785 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12786}
12787
12788/**
12789 * IOMIOPortWrite notification.
12790 */
12791VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12792{
12793 PVMCPU pVCpu = VMMGetCpu(pVM);
12794 if (!pVCpu)
12795 return;
12796 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12797 if (!pEvtRec)
12798 return;
12799 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12800 pEvtRec->u.IOPortWrite.Port = Port;
12801 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12802 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12803 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12804 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12805}
12806
12807
12808VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12809{
12810 PVMCPU pVCpu = VMMGetCpu(pVM);
12811 if (!pVCpu)
12812 return;
12813 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12814 if (!pEvtRec)
12815 return;
12816 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12817 pEvtRec->u.IOPortStrRead.Port = Port;
12818 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12819 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12820 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12821 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12822}
12823
12824
12825VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12826{
12827 PVMCPU pVCpu = VMMGetCpu(pVM);
12828 if (!pVCpu)
12829 return;
12830 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12831 if (!pEvtRec)
12832 return;
12833 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12834 pEvtRec->u.IOPortStrWrite.Port = Port;
12835 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12836 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12837 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12838 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12839}
12840
12841
12842/**
12843 * Fakes and records an I/O port read.
12844 *
12845 * @returns VINF_SUCCESS.
12846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12847 * @param Port The I/O port.
12848 * @param pu32Value Where to store the fake value.
12849 * @param cbValue The size of the access.
12850 */
12851IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12852{
12853 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12854 if (pEvtRec)
12855 {
12856 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12857 pEvtRec->u.IOPortRead.Port = Port;
12858 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12859 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12860 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12861 }
12862 pVCpu->iem.s.cIOReads++;
12863 *pu32Value = 0xcccccccc;
12864 return VINF_SUCCESS;
12865}
12866
12867
12868/**
12869 * Fakes and records an I/O port write.
12870 *
12871 * @returns VINF_SUCCESS.
12872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12873 * @param Port The I/O port.
12874 * @param u32Value The value being written.
12875 * @param cbValue The size of the access.
12876 */
12877IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12878{
12879 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12880 if (pEvtRec)
12881 {
12882 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12883 pEvtRec->u.IOPortWrite.Port = Port;
12884 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12885 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12886 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12887 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12888 }
12889 pVCpu->iem.s.cIOWrites++;
12890 return VINF_SUCCESS;
12891}
12892
12893
12894/**
12895 * Used to add extra details about a stub case.
12896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12897 */
12898IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12899{
12900 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12901 PVM pVM = pVCpu->CTX_SUFF(pVM);
12902 PVMCPU pVCpu = pVCpu;
12903 char szRegs[4096];
12904 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12905 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12906 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12907 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12908 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12909 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12910 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12911 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12912 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12913 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12914 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12915 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12916 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12917 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12918 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12919 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12920 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12921 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12922 " efer=%016VR{efer}\n"
12923 " pat=%016VR{pat}\n"
12924 " sf_mask=%016VR{sf_mask}\n"
12925 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12926 " lstar=%016VR{lstar}\n"
12927 " star=%016VR{star} cstar=%016VR{cstar}\n"
12928 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12929 );
12930
12931 char szInstr1[256];
12932 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12933 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12934 szInstr1, sizeof(szInstr1), NULL);
12935 char szInstr2[256];
12936 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12937 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12938 szInstr2, sizeof(szInstr2), NULL);
12939
12940 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12941}
12942
12943
12944/**
12945 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12946 * dump to the assertion info.
12947 *
12948 * @param pEvtRec The record to dump.
12949 */
12950IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12951{
12952 switch (pEvtRec->enmEvent)
12953 {
12954 case IEMVERIFYEVENT_IOPORT_READ:
12955 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12956 pEvtRec->u.IOPortWrite.Port,
12957 pEvtRec->u.IOPortWrite.cbValue);
12958 break;
12959 case IEMVERIFYEVENT_IOPORT_WRITE:
12960 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12961 pEvtRec->u.IOPortWrite.Port,
12962 pEvtRec->u.IOPortWrite.cbValue,
12963 pEvtRec->u.IOPortWrite.u32Value);
12964 break;
12965 case IEMVERIFYEVENT_IOPORT_STR_READ:
12966 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12967 pEvtRec->u.IOPortStrWrite.Port,
12968 pEvtRec->u.IOPortStrWrite.cbValue,
12969 pEvtRec->u.IOPortStrWrite.cTransfers);
12970 break;
12971 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12972 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12973 pEvtRec->u.IOPortStrWrite.Port,
12974 pEvtRec->u.IOPortStrWrite.cbValue,
12975 pEvtRec->u.IOPortStrWrite.cTransfers);
12976 break;
12977 case IEMVERIFYEVENT_RAM_READ:
12978 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12979 pEvtRec->u.RamRead.GCPhys,
12980 pEvtRec->u.RamRead.cb);
12981 break;
12982 case IEMVERIFYEVENT_RAM_WRITE:
12983 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12984 pEvtRec->u.RamWrite.GCPhys,
12985 pEvtRec->u.RamWrite.cb,
12986 (int)pEvtRec->u.RamWrite.cb,
12987 pEvtRec->u.RamWrite.ab);
12988 break;
12989 default:
12990 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12991 break;
12992 }
12993}
12994
12995
12996/**
12997 * Raises an assertion on the specified record, showing the given message with
12998 * a record dump attached.
12999 *
13000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13001 * @param pEvtRec1 The first record.
13002 * @param pEvtRec2 The second record.
13003 * @param pszMsg The message explaining why we're asserting.
13004 */
13005IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13006{
13007 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13008 iemVerifyAssertAddRecordDump(pEvtRec1);
13009 iemVerifyAssertAddRecordDump(pEvtRec2);
13010 iemVerifyAssertMsg2(pVCpu);
13011 RTAssertPanic();
13012}
13013
13014
13015/**
13016 * Raises an assertion on the specified record, showing the given message with
13017 * a record dump attached.
13018 *
13019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13020 * @param pEvtRec1 The first record.
13021 * @param pszMsg The message explaining why we're asserting.
13022 */
13023IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13024{
13025 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13026 iemVerifyAssertAddRecordDump(pEvtRec);
13027 iemVerifyAssertMsg2(pVCpu);
13028 RTAssertPanic();
13029}
13030
13031
13032/**
13033 * Verifies a write record.
13034 *
13035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13036 * @param pEvtRec The write record.
13037 * @param fRem Set if REM was doing the other executing. If clear
13038 * it was HM.
13039 */
13040IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13041{
13042 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13043 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13044 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13045 if ( RT_FAILURE(rc)
13046 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13047 {
13048 /* fend off ins */
13049 if ( !pVCpu->iem.s.cIOReads
13050 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13051 || ( pEvtRec->u.RamWrite.cb != 1
13052 && pEvtRec->u.RamWrite.cb != 2
13053 && pEvtRec->u.RamWrite.cb != 4) )
13054 {
13055 /* fend off ROMs and MMIO */
13056 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13057 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13058 {
13059 /* fend off fxsave */
13060 if (pEvtRec->u.RamWrite.cb != 512)
13061 {
13062 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13063 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13064 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13065 RTAssertMsg2Add("%s: %.*Rhxs\n"
13066 "iem: %.*Rhxs\n",
13067 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13068 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13069 iemVerifyAssertAddRecordDump(pEvtRec);
13070 iemVerifyAssertMsg2(pVCpu);
13071 RTAssertPanic();
13072 }
13073 }
13074 }
13075 }
13076
13077}
13078
13079/**
13080 * Performs the post-execution verfication checks.
13081 */
13082IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13083{
13084 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13085 return rcStrictIem;
13086
13087 /*
13088 * Switch back the state.
13089 */
13090 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13091 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13092 Assert(pOrgCtx != pDebugCtx);
13093 IEM_GET_CTX(pVCpu) = pOrgCtx;
13094
13095 /*
13096 * Execute the instruction in REM.
13097 */
13098 bool fRem = false;
13099 PVM pVM = pVCpu->CTX_SUFF(pVM);
13100 PVMCPU pVCpu = pVCpu;
13101 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13102#ifdef IEM_VERIFICATION_MODE_FULL_HM
13103 if ( HMIsEnabled(pVM)
13104 && pVCpu->iem.s.cIOReads == 0
13105 && pVCpu->iem.s.cIOWrites == 0
13106 && !pVCpu->iem.s.fProblematicMemory)
13107 {
13108 uint64_t uStartRip = pOrgCtx->rip;
13109 unsigned iLoops = 0;
13110 do
13111 {
13112 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13113 iLoops++;
13114 } while ( rc == VINF_SUCCESS
13115 || ( rc == VINF_EM_DBG_STEPPED
13116 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13117 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13118 || ( pOrgCtx->rip != pDebugCtx->rip
13119 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13120 && iLoops < 8) );
13121 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13122 rc = VINF_SUCCESS;
13123 }
13124#endif
13125 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13126 || rc == VINF_IOM_R3_IOPORT_READ
13127 || rc == VINF_IOM_R3_IOPORT_WRITE
13128 || rc == VINF_IOM_R3_MMIO_READ
13129 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13130 || rc == VINF_IOM_R3_MMIO_WRITE
13131 || rc == VINF_CPUM_R3_MSR_READ
13132 || rc == VINF_CPUM_R3_MSR_WRITE
13133 || rc == VINF_EM_RESCHEDULE
13134 )
13135 {
13136 EMRemLock(pVM);
13137 rc = REMR3EmulateInstruction(pVM, pVCpu);
13138 AssertRC(rc);
13139 EMRemUnlock(pVM);
13140 fRem = true;
13141 }
13142
13143# if 1 /* Skip unimplemented instructions for now. */
13144 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13145 {
13146 IEM_GET_CTX(pVCpu) = pOrgCtx;
13147 if (rc == VINF_EM_DBG_STEPPED)
13148 return VINF_SUCCESS;
13149 return rc;
13150 }
13151# endif
13152
13153 /*
13154 * Compare the register states.
13155 */
13156 unsigned cDiffs = 0;
13157 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13158 {
13159 //Log(("REM and IEM ends up with different registers!\n"));
13160 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13161
13162# define CHECK_FIELD(a_Field) \
13163 do \
13164 { \
13165 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13166 { \
13167 switch (sizeof(pOrgCtx->a_Field)) \
13168 { \
13169 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13170 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13171 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13172 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13173 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13174 } \
13175 cDiffs++; \
13176 } \
13177 } while (0)
13178# define CHECK_XSTATE_FIELD(a_Field) \
13179 do \
13180 { \
13181 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13182 { \
13183 switch (sizeof(pOrgXState->a_Field)) \
13184 { \
13185 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13186 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13187 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13188 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13189 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13190 } \
13191 cDiffs++; \
13192 } \
13193 } while (0)
13194
13195# define CHECK_BIT_FIELD(a_Field) \
13196 do \
13197 { \
13198 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13199 { \
13200 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13201 cDiffs++; \
13202 } \
13203 } while (0)
13204
13205# define CHECK_SEL(a_Sel) \
13206 do \
13207 { \
13208 CHECK_FIELD(a_Sel.Sel); \
13209 CHECK_FIELD(a_Sel.Attr.u); \
13210 CHECK_FIELD(a_Sel.u64Base); \
13211 CHECK_FIELD(a_Sel.u32Limit); \
13212 CHECK_FIELD(a_Sel.fFlags); \
13213 } while (0)
13214
13215 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13216 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13217
13218#if 1 /* The recompiler doesn't update these the intel way. */
13219 if (fRem)
13220 {
13221 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13222 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13223 pOrgXState->x87.CS = pDebugXState->x87.CS;
13224 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13225 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13226 pOrgXState->x87.DS = pDebugXState->x87.DS;
13227 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13228 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13229 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13230 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13231 }
13232#endif
13233 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13234 {
13235 RTAssertMsg2Weak(" the FPU state differs\n");
13236 cDiffs++;
13237 CHECK_XSTATE_FIELD(x87.FCW);
13238 CHECK_XSTATE_FIELD(x87.FSW);
13239 CHECK_XSTATE_FIELD(x87.FTW);
13240 CHECK_XSTATE_FIELD(x87.FOP);
13241 CHECK_XSTATE_FIELD(x87.FPUIP);
13242 CHECK_XSTATE_FIELD(x87.CS);
13243 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13244 CHECK_XSTATE_FIELD(x87.FPUDP);
13245 CHECK_XSTATE_FIELD(x87.DS);
13246 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13247 CHECK_XSTATE_FIELD(x87.MXCSR);
13248 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13249 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13250 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13251 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13252 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13253 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13254 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13255 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13256 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13257 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13258 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13259 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13260 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13261 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13262 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13263 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13264 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13265 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13266 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13267 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13268 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13269 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13270 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13271 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13272 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13273 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13274 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13275 }
13276 CHECK_FIELD(rip);
13277 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13278 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13279 {
13280 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13281 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13282 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13283 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13284 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13285 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13286 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13287 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13288 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13289 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13290 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13291 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13292 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13293 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13294 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13295 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13296 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13297 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13298 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13299 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13300 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13301 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13302 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13303 }
13304
13305 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13306 CHECK_FIELD(rax);
13307 CHECK_FIELD(rcx);
13308 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13309 CHECK_FIELD(rdx);
13310 CHECK_FIELD(rbx);
13311 CHECK_FIELD(rsp);
13312 CHECK_FIELD(rbp);
13313 CHECK_FIELD(rsi);
13314 CHECK_FIELD(rdi);
13315 CHECK_FIELD(r8);
13316 CHECK_FIELD(r9);
13317 CHECK_FIELD(r10);
13318 CHECK_FIELD(r11);
13319 CHECK_FIELD(r12);
13320 CHECK_FIELD(r13);
13321 CHECK_SEL(cs);
13322 CHECK_SEL(ss);
13323 CHECK_SEL(ds);
13324 CHECK_SEL(es);
13325 CHECK_SEL(fs);
13326 CHECK_SEL(gs);
13327 CHECK_FIELD(cr0);
13328
13329 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13330 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13331 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13332 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13333 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13334 {
13335 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13336 { /* ignore */ }
13337 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13338 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13339 && fRem)
13340 { /* ignore */ }
13341 else
13342 CHECK_FIELD(cr2);
13343 }
13344 CHECK_FIELD(cr3);
13345 CHECK_FIELD(cr4);
13346 CHECK_FIELD(dr[0]);
13347 CHECK_FIELD(dr[1]);
13348 CHECK_FIELD(dr[2]);
13349 CHECK_FIELD(dr[3]);
13350 CHECK_FIELD(dr[6]);
13351 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13352 CHECK_FIELD(dr[7]);
13353 CHECK_FIELD(gdtr.cbGdt);
13354 CHECK_FIELD(gdtr.pGdt);
13355 CHECK_FIELD(idtr.cbIdt);
13356 CHECK_FIELD(idtr.pIdt);
13357 CHECK_SEL(ldtr);
13358 CHECK_SEL(tr);
13359 CHECK_FIELD(SysEnter.cs);
13360 CHECK_FIELD(SysEnter.eip);
13361 CHECK_FIELD(SysEnter.esp);
13362 CHECK_FIELD(msrEFER);
13363 CHECK_FIELD(msrSTAR);
13364 CHECK_FIELD(msrPAT);
13365 CHECK_FIELD(msrLSTAR);
13366 CHECK_FIELD(msrCSTAR);
13367 CHECK_FIELD(msrSFMASK);
13368 CHECK_FIELD(msrKERNELGSBASE);
13369
13370 if (cDiffs != 0)
13371 {
13372 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13373 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13374 RTAssertPanic();
13375 static bool volatile s_fEnterDebugger = true;
13376 if (s_fEnterDebugger)
13377 DBGFSTOP(pVM);
13378
13379# if 1 /* Ignore unimplemented instructions for now. */
13380 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13381 rcStrictIem = VINF_SUCCESS;
13382# endif
13383 }
13384# undef CHECK_FIELD
13385# undef CHECK_BIT_FIELD
13386 }
13387
13388 /*
13389 * If the register state compared fine, check the verification event
13390 * records.
13391 */
13392 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13393 {
13394 /*
13395 * Compare verficiation event records.
13396 * - I/O port accesses should be a 1:1 match.
13397 */
13398 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13399 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13400 while (pIemRec && pOtherRec)
13401 {
13402 /* Since we might miss RAM writes and reads, ignore reads and check
13403 that any written memory is the same extra ones. */
13404 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13405 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13406 && pIemRec->pNext)
13407 {
13408 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13409 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13410 pIemRec = pIemRec->pNext;
13411 }
13412
13413 /* Do the compare. */
13414 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13415 {
13416 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13417 break;
13418 }
13419 bool fEquals;
13420 switch (pIemRec->enmEvent)
13421 {
13422 case IEMVERIFYEVENT_IOPORT_READ:
13423 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13424 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13425 break;
13426 case IEMVERIFYEVENT_IOPORT_WRITE:
13427 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13428 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13429 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13430 break;
13431 case IEMVERIFYEVENT_IOPORT_STR_READ:
13432 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13433 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13434 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13435 break;
13436 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13437 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13438 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13439 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13440 break;
13441 case IEMVERIFYEVENT_RAM_READ:
13442 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13443 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13444 break;
13445 case IEMVERIFYEVENT_RAM_WRITE:
13446 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13447 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13448 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13449 break;
13450 default:
13451 fEquals = false;
13452 break;
13453 }
13454 if (!fEquals)
13455 {
13456 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13457 break;
13458 }
13459
13460 /* advance */
13461 pIemRec = pIemRec->pNext;
13462 pOtherRec = pOtherRec->pNext;
13463 }
13464
13465 /* Ignore extra writes and reads. */
13466 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13467 {
13468 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13469 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13470 pIemRec = pIemRec->pNext;
13471 }
13472 if (pIemRec != NULL)
13473 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13474 else if (pOtherRec != NULL)
13475 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13476 }
13477 IEM_GET_CTX(pVCpu) = pOrgCtx;
13478
13479 return rcStrictIem;
13480}
13481
13482#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13483
13484/* stubs */
13485IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13486{
13487 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13488 return VERR_INTERNAL_ERROR;
13489}
13490
13491IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13492{
13493 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13494 return VERR_INTERNAL_ERROR;
13495}
13496
13497#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13498
13499
13500#ifdef LOG_ENABLED
13501/**
13502 * Logs the current instruction.
13503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13504 * @param pCtx The current CPU context.
13505 * @param fSameCtx Set if we have the same context information as the VMM,
13506 * clear if we may have already executed an instruction in
13507 * our debug context. When clear, we assume IEMCPU holds
13508 * valid CPU mode info.
13509 */
13510IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13511{
13512# ifdef IN_RING3
13513 if (LogIs2Enabled())
13514 {
13515 char szInstr[256];
13516 uint32_t cbInstr = 0;
13517 if (fSameCtx)
13518 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13519 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13520 szInstr, sizeof(szInstr), &cbInstr);
13521 else
13522 {
13523 uint32_t fFlags = 0;
13524 switch (pVCpu->iem.s.enmCpuMode)
13525 {
13526 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13527 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13528 case IEMMODE_16BIT:
13529 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13530 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13531 else
13532 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13533 break;
13534 }
13535 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13536 szInstr, sizeof(szInstr), &cbInstr);
13537 }
13538
13539 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13540 Log2(("****\n"
13541 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13542 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13543 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13544 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13545 " %s\n"
13546 ,
13547 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13548 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13549 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13550 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13551 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13552 szInstr));
13553
13554 if (LogIs3Enabled())
13555 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13556 }
13557 else
13558# endif
13559 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13560 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13561 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13562}
13563#endif
13564
13565
13566/**
13567 * Makes status code addjustments (pass up from I/O and access handler)
13568 * as well as maintaining statistics.
13569 *
13570 * @returns Strict VBox status code to pass up.
13571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13572 * @param rcStrict The status from executing an instruction.
13573 */
13574DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13575{
13576 if (rcStrict != VINF_SUCCESS)
13577 {
13578 if (RT_SUCCESS(rcStrict))
13579 {
13580 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13581 || rcStrict == VINF_IOM_R3_IOPORT_READ
13582 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13583 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13584 || rcStrict == VINF_IOM_R3_MMIO_READ
13585 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13586 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13587 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13588 || rcStrict == VINF_CPUM_R3_MSR_READ
13589 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13590 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13591 || rcStrict == VINF_EM_RAW_TO_R3
13592 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13593 /* raw-mode / virt handlers only: */
13594 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13595 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13596 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13597 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13598 || rcStrict == VINF_SELM_SYNC_GDT
13599 || rcStrict == VINF_CSAM_PENDING_ACTION
13600 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13601 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13602/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13603 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13604 if (rcPassUp == VINF_SUCCESS)
13605 pVCpu->iem.s.cRetInfStatuses++;
13606 else if ( rcPassUp < VINF_EM_FIRST
13607 || rcPassUp > VINF_EM_LAST
13608 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13609 {
13610 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13611 pVCpu->iem.s.cRetPassUpStatus++;
13612 rcStrict = rcPassUp;
13613 }
13614 else
13615 {
13616 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13617 pVCpu->iem.s.cRetInfStatuses++;
13618 }
13619 }
13620 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13621 pVCpu->iem.s.cRetAspectNotImplemented++;
13622 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13623 pVCpu->iem.s.cRetInstrNotImplemented++;
13624#ifdef IEM_VERIFICATION_MODE_FULL
13625 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13626 rcStrict = VINF_SUCCESS;
13627#endif
13628 else
13629 pVCpu->iem.s.cRetErrStatuses++;
13630 }
13631 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13632 {
13633 pVCpu->iem.s.cRetPassUpStatus++;
13634 rcStrict = pVCpu->iem.s.rcPassUp;
13635 }
13636
13637 return rcStrict;
13638}
13639
13640
13641/**
13642 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13643 * IEMExecOneWithPrefetchedByPC.
13644 *
13645 * Similar code is found in IEMExecLots.
13646 *
13647 * @return Strict VBox status code.
13648 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13650 * @param fExecuteInhibit If set, execute the instruction following CLI,
13651 * POP SS and MOV SS,GR.
13652 */
13653DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13654{
13655#ifdef IEM_WITH_SETJMP
13656 VBOXSTRICTRC rcStrict;
13657 jmp_buf JmpBuf;
13658 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13659 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13660 if ((rcStrict = setjmp(JmpBuf)) == 0)
13661 {
13662 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13663 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13664 }
13665 else
13666 pVCpu->iem.s.cLongJumps++;
13667 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13668#else
13669 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13670 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13671#endif
13672 if (rcStrict == VINF_SUCCESS)
13673 pVCpu->iem.s.cInstructions++;
13674 if (pVCpu->iem.s.cActiveMappings > 0)
13675 {
13676 Assert(rcStrict != VINF_SUCCESS);
13677 iemMemRollback(pVCpu);
13678 }
13679//#ifdef DEBUG
13680// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13681//#endif
13682
13683 /* Execute the next instruction as well if a cli, pop ss or
13684 mov ss, Gr has just completed successfully. */
13685 if ( fExecuteInhibit
13686 && rcStrict == VINF_SUCCESS
13687 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13688 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13689 {
13690 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13691 if (rcStrict == VINF_SUCCESS)
13692 {
13693#ifdef LOG_ENABLED
13694 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13695#endif
13696#ifdef IEM_WITH_SETJMP
13697 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13698 if ((rcStrict = setjmp(JmpBuf)) == 0)
13699 {
13700 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13701 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13702 }
13703 else
13704 pVCpu->iem.s.cLongJumps++;
13705 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13706#else
13707 IEM_OPCODE_GET_NEXT_U8(&b);
13708 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13709#endif
13710 if (rcStrict == VINF_SUCCESS)
13711 pVCpu->iem.s.cInstructions++;
13712 if (pVCpu->iem.s.cActiveMappings > 0)
13713 {
13714 Assert(rcStrict != VINF_SUCCESS);
13715 iemMemRollback(pVCpu);
13716 }
13717 }
13718 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13719 }
13720
13721 /*
13722 * Return value fiddling, statistics and sanity assertions.
13723 */
13724 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13725
13726 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13727 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13728#if defined(IEM_VERIFICATION_MODE_FULL)
13729 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13730 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13731 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13732 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13733#endif
13734 return rcStrict;
13735}
13736
13737
13738#ifdef IN_RC
13739/**
13740 * Re-enters raw-mode or ensure we return to ring-3.
13741 *
13742 * @returns rcStrict, maybe modified.
13743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13744 * @param pCtx The current CPU context.
13745 * @param rcStrict The status code returne by the interpreter.
13746 */
13747DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13748{
13749 if ( !pVCpu->iem.s.fInPatchCode
13750 && ( rcStrict == VINF_SUCCESS
13751 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13752 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13753 {
13754 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13755 CPUMRawEnter(pVCpu);
13756 else
13757 {
13758 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13759 rcStrict = VINF_EM_RESCHEDULE;
13760 }
13761 }
13762 return rcStrict;
13763}
13764#endif
13765
13766
13767/**
13768 * Execute one instruction.
13769 *
13770 * @return Strict VBox status code.
13771 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13772 */
13773VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13774{
13775#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13776 if (++pVCpu->iem.s.cVerifyDepth == 1)
13777 iemExecVerificationModeSetup(pVCpu);
13778#endif
13779#ifdef LOG_ENABLED
13780 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13781 iemLogCurInstr(pVCpu, pCtx, true);
13782#endif
13783
13784 /*
13785 * Do the decoding and emulation.
13786 */
13787 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13788 if (rcStrict == VINF_SUCCESS)
13789 rcStrict = iemExecOneInner(pVCpu, true);
13790
13791#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13792 /*
13793 * Assert some sanity.
13794 */
13795 if (pVCpu->iem.s.cVerifyDepth == 1)
13796 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13797 pVCpu->iem.s.cVerifyDepth--;
13798#endif
13799#ifdef IN_RC
13800 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13801#endif
13802 if (rcStrict != VINF_SUCCESS)
13803 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13804 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13805 return rcStrict;
13806}
13807
13808
13809VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13810{
13811 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13812 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13813
13814 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13815 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13816 if (rcStrict == VINF_SUCCESS)
13817 {
13818 rcStrict = iemExecOneInner(pVCpu, true);
13819 if (pcbWritten)
13820 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13821 }
13822
13823#ifdef IN_RC
13824 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13825#endif
13826 return rcStrict;
13827}
13828
13829
13830VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13831 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13832{
13833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13834 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13835
13836 VBOXSTRICTRC rcStrict;
13837 if ( cbOpcodeBytes
13838 && pCtx->rip == OpcodeBytesPC)
13839 {
13840 iemInitDecoder(pVCpu, false);
13841#ifdef IEM_WITH_CODE_TLB
13842 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13843 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13844 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13845 pVCpu->iem.s.offCurInstrStart = 0;
13846 pVCpu->iem.s.offInstrNextByte = 0;
13847#else
13848 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13849 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13850#endif
13851 rcStrict = VINF_SUCCESS;
13852 }
13853 else
13854 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13855 if (rcStrict == VINF_SUCCESS)
13856 {
13857 rcStrict = iemExecOneInner(pVCpu, true);
13858 }
13859
13860#ifdef IN_RC
13861 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13862#endif
13863 return rcStrict;
13864}
13865
13866
13867VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13868{
13869 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13870 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13871
13872 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13873 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13874 if (rcStrict == VINF_SUCCESS)
13875 {
13876 rcStrict = iemExecOneInner(pVCpu, false);
13877 if (pcbWritten)
13878 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13879 }
13880
13881#ifdef IN_RC
13882 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13883#endif
13884 return rcStrict;
13885}
13886
13887
13888VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13889 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13890{
13891 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13892 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13893
13894 VBOXSTRICTRC rcStrict;
13895 if ( cbOpcodeBytes
13896 && pCtx->rip == OpcodeBytesPC)
13897 {
13898 iemInitDecoder(pVCpu, true);
13899#ifdef IEM_WITH_CODE_TLB
13900 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13901 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13902 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13903 pVCpu->iem.s.offCurInstrStart = 0;
13904 pVCpu->iem.s.offInstrNextByte = 0;
13905#else
13906 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13907 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13908#endif
13909 rcStrict = VINF_SUCCESS;
13910 }
13911 else
13912 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13913 if (rcStrict == VINF_SUCCESS)
13914 rcStrict = iemExecOneInner(pVCpu, false);
13915
13916#ifdef IN_RC
13917 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13918#endif
13919 return rcStrict;
13920}
13921
13922
13923/**
13924 * For debugging DISGetParamSize, may come in handy.
13925 *
13926 * @returns Strict VBox status code.
13927 * @param pVCpu The cross context virtual CPU structure of the
13928 * calling EMT.
13929 * @param pCtxCore The context core structure.
13930 * @param OpcodeBytesPC The PC of the opcode bytes.
13931 * @param pvOpcodeBytes Prefeched opcode bytes.
13932 * @param cbOpcodeBytes Number of prefetched bytes.
13933 * @param pcbWritten Where to return the number of bytes written.
13934 * Optional.
13935 */
13936VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13937 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13938 uint32_t *pcbWritten)
13939{
13940 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13941 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13942
13943 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13944 VBOXSTRICTRC rcStrict;
13945 if ( cbOpcodeBytes
13946 && pCtx->rip == OpcodeBytesPC)
13947 {
13948 iemInitDecoder(pVCpu, true);
13949#ifdef IEM_WITH_CODE_TLB
13950 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13951 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13952 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13953 pVCpu->iem.s.offCurInstrStart = 0;
13954 pVCpu->iem.s.offInstrNextByte = 0;
13955#else
13956 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13957 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13958#endif
13959 rcStrict = VINF_SUCCESS;
13960 }
13961 else
13962 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13963 if (rcStrict == VINF_SUCCESS)
13964 {
13965 rcStrict = iemExecOneInner(pVCpu, false);
13966 if (pcbWritten)
13967 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13968 }
13969
13970#ifdef IN_RC
13971 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13972#endif
13973 return rcStrict;
13974}
13975
13976
13977VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13978{
13979 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13980
13981#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13982 /*
13983 * See if there is an interrupt pending in TRPM, inject it if we can.
13984 */
13985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13986# ifdef IEM_VERIFICATION_MODE_FULL
13987 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13988# endif
13989 if ( pCtx->eflags.Bits.u1IF
13990 && TRPMHasTrap(pVCpu)
13991 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13992 {
13993 uint8_t u8TrapNo;
13994 TRPMEVENT enmType;
13995 RTGCUINT uErrCode;
13996 RTGCPTR uCr2;
13997 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13998 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13999 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14000 TRPMResetTrap(pVCpu);
14001 }
14002
14003 /*
14004 * Log the state.
14005 */
14006# ifdef LOG_ENABLED
14007 iemLogCurInstr(pVCpu, pCtx, true);
14008# endif
14009
14010 /*
14011 * Do the decoding and emulation.
14012 */
14013 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14014 if (rcStrict == VINF_SUCCESS)
14015 rcStrict = iemExecOneInner(pVCpu, true);
14016
14017 /*
14018 * Assert some sanity.
14019 */
14020 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14021
14022 /*
14023 * Log and return.
14024 */
14025 if (rcStrict != VINF_SUCCESS)
14026 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14027 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14028 if (pcInstructions)
14029 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14030 return rcStrict;
14031
14032#else /* Not verification mode */
14033
14034 /*
14035 * See if there is an interrupt pending in TRPM, inject it if we can.
14036 */
14037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14038# ifdef IEM_VERIFICATION_MODE_FULL
14039 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14040# endif
14041 if ( pCtx->eflags.Bits.u1IF
14042 && TRPMHasTrap(pVCpu)
14043 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14044 {
14045 uint8_t u8TrapNo;
14046 TRPMEVENT enmType;
14047 RTGCUINT uErrCode;
14048 RTGCPTR uCr2;
14049 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14050 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14051 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14052 TRPMResetTrap(pVCpu);
14053 }
14054
14055 /*
14056 * Initial decoder init w/ prefetch, then setup setjmp.
14057 */
14058 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14059 if (rcStrict == VINF_SUCCESS)
14060 {
14061# ifdef IEM_WITH_SETJMP
14062 jmp_buf JmpBuf;
14063 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14064 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14065 pVCpu->iem.s.cActiveMappings = 0;
14066 if ((rcStrict = setjmp(JmpBuf)) == 0)
14067# endif
14068 {
14069 /*
14070 * The run loop. We limit ourselves to 4096 instructions right now.
14071 */
14072 PVM pVM = pVCpu->CTX_SUFF(pVM);
14073 uint32_t cInstr = 4096;
14074 for (;;)
14075 {
14076 /*
14077 * Log the state.
14078 */
14079# ifdef LOG_ENABLED
14080 iemLogCurInstr(pVCpu, pCtx, true);
14081# endif
14082
14083 /*
14084 * Do the decoding and emulation.
14085 */
14086 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14087 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14088 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14089 {
14090 Assert(pVCpu->iem.s.cActiveMappings == 0);
14091 pVCpu->iem.s.cInstructions++;
14092 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14093 {
14094 uint32_t fCpu = pVCpu->fLocalForcedActions
14095 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14096 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14097 | VMCPU_FF_TLB_FLUSH
14098# ifdef VBOX_WITH_RAW_MODE
14099 | VMCPU_FF_TRPM_SYNC_IDT
14100 | VMCPU_FF_SELM_SYNC_TSS
14101 | VMCPU_FF_SELM_SYNC_GDT
14102 | VMCPU_FF_SELM_SYNC_LDT
14103# endif
14104 | VMCPU_FF_INHIBIT_INTERRUPTS
14105 | VMCPU_FF_BLOCK_NMIS ));
14106
14107 if (RT_LIKELY( ( !fCpu
14108 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14109 && !pCtx->rflags.Bits.u1IF) )
14110 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14111 {
14112 if (cInstr-- > 0)
14113 {
14114 Assert(pVCpu->iem.s.cActiveMappings == 0);
14115 iemReInitDecoder(pVCpu);
14116 continue;
14117 }
14118 }
14119 }
14120 Assert(pVCpu->iem.s.cActiveMappings == 0);
14121 }
14122 else if (pVCpu->iem.s.cActiveMappings > 0)
14123 iemMemRollback(pVCpu);
14124 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14125 break;
14126 }
14127 }
14128# ifdef IEM_WITH_SETJMP
14129 else
14130 {
14131 if (pVCpu->iem.s.cActiveMappings > 0)
14132 iemMemRollback(pVCpu);
14133 pVCpu->iem.s.cLongJumps++;
14134 }
14135 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14136# endif
14137
14138 /*
14139 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14140 */
14141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14143# if defined(IEM_VERIFICATION_MODE_FULL)
14144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14147 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14148# endif
14149 }
14150
14151 /*
14152 * Maybe re-enter raw-mode and log.
14153 */
14154# ifdef IN_RC
14155 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14156# endif
14157 if (rcStrict != VINF_SUCCESS)
14158 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14159 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14160 if (pcInstructions)
14161 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14162 return rcStrict;
14163#endif /* Not verification mode */
14164}
14165
14166
14167
14168/**
14169 * Injects a trap, fault, abort, software interrupt or external interrupt.
14170 *
14171 * The parameter list matches TRPMQueryTrapAll pretty closely.
14172 *
14173 * @returns Strict VBox status code.
14174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14175 * @param u8TrapNo The trap number.
14176 * @param enmType What type is it (trap/fault/abort), software
14177 * interrupt or hardware interrupt.
14178 * @param uErrCode The error code if applicable.
14179 * @param uCr2 The CR2 value if applicable.
14180 * @param cbInstr The instruction length (only relevant for
14181 * software interrupts).
14182 */
14183VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14184 uint8_t cbInstr)
14185{
14186 iemInitDecoder(pVCpu, false);
14187#ifdef DBGFTRACE_ENABLED
14188 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14189 u8TrapNo, enmType, uErrCode, uCr2);
14190#endif
14191
14192 uint32_t fFlags;
14193 switch (enmType)
14194 {
14195 case TRPM_HARDWARE_INT:
14196 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14197 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14198 uErrCode = uCr2 = 0;
14199 break;
14200
14201 case TRPM_SOFTWARE_INT:
14202 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14203 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14204 uErrCode = uCr2 = 0;
14205 break;
14206
14207 case TRPM_TRAP:
14208 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14209 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14210 if (u8TrapNo == X86_XCPT_PF)
14211 fFlags |= IEM_XCPT_FLAGS_CR2;
14212 switch (u8TrapNo)
14213 {
14214 case X86_XCPT_DF:
14215 case X86_XCPT_TS:
14216 case X86_XCPT_NP:
14217 case X86_XCPT_SS:
14218 case X86_XCPT_PF:
14219 case X86_XCPT_AC:
14220 fFlags |= IEM_XCPT_FLAGS_ERR;
14221 break;
14222
14223 case X86_XCPT_NMI:
14224 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14225 break;
14226 }
14227 break;
14228
14229 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14230 }
14231
14232 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14233}
14234
14235
14236/**
14237 * Injects the active TRPM event.
14238 *
14239 * @returns Strict VBox status code.
14240 * @param pVCpu The cross context virtual CPU structure.
14241 */
14242VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14243{
14244#ifndef IEM_IMPLEMENTS_TASKSWITCH
14245 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14246#else
14247 uint8_t u8TrapNo;
14248 TRPMEVENT enmType;
14249 RTGCUINT uErrCode;
14250 RTGCUINTPTR uCr2;
14251 uint8_t cbInstr;
14252 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14253 if (RT_FAILURE(rc))
14254 return rc;
14255
14256 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14257
14258 /** @todo Are there any other codes that imply the event was successfully
14259 * delivered to the guest? See @bugref{6607}. */
14260 if ( rcStrict == VINF_SUCCESS
14261 || rcStrict == VINF_IEM_RAISED_XCPT)
14262 {
14263 TRPMResetTrap(pVCpu);
14264 }
14265 return rcStrict;
14266#endif
14267}
14268
14269
14270VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14271{
14272 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14273 return VERR_NOT_IMPLEMENTED;
14274}
14275
14276
14277VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14278{
14279 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14280 return VERR_NOT_IMPLEMENTED;
14281}
14282
14283
14284#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14285/**
14286 * Executes a IRET instruction with default operand size.
14287 *
14288 * This is for PATM.
14289 *
14290 * @returns VBox status code.
14291 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14292 * @param pCtxCore The register frame.
14293 */
14294VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14295{
14296 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14297
14298 iemCtxCoreToCtx(pCtx, pCtxCore);
14299 iemInitDecoder(pVCpu);
14300 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14301 if (rcStrict == VINF_SUCCESS)
14302 iemCtxToCtxCore(pCtxCore, pCtx);
14303 else
14304 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14305 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14306 return rcStrict;
14307}
14308#endif
14309
14310
14311/**
14312 * Macro used by the IEMExec* method to check the given instruction length.
14313 *
14314 * Will return on failure!
14315 *
14316 * @param a_cbInstr The given instruction length.
14317 * @param a_cbMin The minimum length.
14318 */
14319#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14320 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14321 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14322
14323
14324/**
14325 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14326 *
14327 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14328 *
14329 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14331 * @param rcStrict The status code to fiddle.
14332 */
14333DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14334{
14335 iemUninitExec(pVCpu);
14336#ifdef IN_RC
14337 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14338 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14339#else
14340 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14341#endif
14342}
14343
14344
14345/**
14346 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14347 *
14348 * This API ASSUMES that the caller has already verified that the guest code is
14349 * allowed to access the I/O port. (The I/O port is in the DX register in the
14350 * guest state.)
14351 *
14352 * @returns Strict VBox status code.
14353 * @param pVCpu The cross context virtual CPU structure.
14354 * @param cbValue The size of the I/O port access (1, 2, or 4).
14355 * @param enmAddrMode The addressing mode.
14356 * @param fRepPrefix Indicates whether a repeat prefix is used
14357 * (doesn't matter which for this instruction).
14358 * @param cbInstr The instruction length in bytes.
14359 * @param iEffSeg The effective segment address.
14360 * @param fIoChecked Whether the access to the I/O port has been
14361 * checked or not. It's typically checked in the
14362 * HM scenario.
14363 */
14364VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14365 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14366{
14367 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14368 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14369
14370 /*
14371 * State init.
14372 */
14373 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14374
14375 /*
14376 * Switch orgy for getting to the right handler.
14377 */
14378 VBOXSTRICTRC rcStrict;
14379 if (fRepPrefix)
14380 {
14381 switch (enmAddrMode)
14382 {
14383 case IEMMODE_16BIT:
14384 switch (cbValue)
14385 {
14386 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14387 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14388 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14389 default:
14390 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14391 }
14392 break;
14393
14394 case IEMMODE_32BIT:
14395 switch (cbValue)
14396 {
14397 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14398 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14399 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14400 default:
14401 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14402 }
14403 break;
14404
14405 case IEMMODE_64BIT:
14406 switch (cbValue)
14407 {
14408 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14409 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14410 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14411 default:
14412 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14413 }
14414 break;
14415
14416 default:
14417 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14418 }
14419 }
14420 else
14421 {
14422 switch (enmAddrMode)
14423 {
14424 case IEMMODE_16BIT:
14425 switch (cbValue)
14426 {
14427 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14428 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14429 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14430 default:
14431 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14432 }
14433 break;
14434
14435 case IEMMODE_32BIT:
14436 switch (cbValue)
14437 {
14438 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14439 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14440 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14441 default:
14442 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14443 }
14444 break;
14445
14446 case IEMMODE_64BIT:
14447 switch (cbValue)
14448 {
14449 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14450 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14451 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14452 default:
14453 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14454 }
14455 break;
14456
14457 default:
14458 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14459 }
14460 }
14461
14462 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14463}
14464
14465
14466/**
14467 * Interface for HM and EM for executing string I/O IN (read) instructions.
14468 *
14469 * This API ASSUMES that the caller has already verified that the guest code is
14470 * allowed to access the I/O port. (The I/O port is in the DX register in the
14471 * guest state.)
14472 *
14473 * @returns Strict VBox status code.
14474 * @param pVCpu The cross context virtual CPU structure.
14475 * @param cbValue The size of the I/O port access (1, 2, or 4).
14476 * @param enmAddrMode The addressing mode.
14477 * @param fRepPrefix Indicates whether a repeat prefix is used
14478 * (doesn't matter which for this instruction).
14479 * @param cbInstr The instruction length in bytes.
14480 * @param fIoChecked Whether the access to the I/O port has been
14481 * checked or not. It's typically checked in the
14482 * HM scenario.
14483 */
14484VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14485 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14486{
14487 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14488
14489 /*
14490 * State init.
14491 */
14492 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14493
14494 /*
14495 * Switch orgy for getting to the right handler.
14496 */
14497 VBOXSTRICTRC rcStrict;
14498 if (fRepPrefix)
14499 {
14500 switch (enmAddrMode)
14501 {
14502 case IEMMODE_16BIT:
14503 switch (cbValue)
14504 {
14505 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14506 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14507 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14508 default:
14509 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14510 }
14511 break;
14512
14513 case IEMMODE_32BIT:
14514 switch (cbValue)
14515 {
14516 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14517 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14518 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14519 default:
14520 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14521 }
14522 break;
14523
14524 case IEMMODE_64BIT:
14525 switch (cbValue)
14526 {
14527 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14528 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14529 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14530 default:
14531 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14532 }
14533 break;
14534
14535 default:
14536 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14537 }
14538 }
14539 else
14540 {
14541 switch (enmAddrMode)
14542 {
14543 case IEMMODE_16BIT:
14544 switch (cbValue)
14545 {
14546 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14547 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14548 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14549 default:
14550 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14551 }
14552 break;
14553
14554 case IEMMODE_32BIT:
14555 switch (cbValue)
14556 {
14557 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14558 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14559 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14560 default:
14561 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14562 }
14563 break;
14564
14565 case IEMMODE_64BIT:
14566 switch (cbValue)
14567 {
14568 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14569 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14570 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14571 default:
14572 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14573 }
14574 break;
14575
14576 default:
14577 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14578 }
14579 }
14580
14581 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14582}
14583
14584
14585/**
14586 * Interface for rawmode to write execute an OUT instruction.
14587 *
14588 * @returns Strict VBox status code.
14589 * @param pVCpu The cross context virtual CPU structure.
14590 * @param cbInstr The instruction length in bytes.
14591 * @param u16Port The port to read.
14592 * @param cbReg The register size.
14593 *
14594 * @remarks In ring-0 not all of the state needs to be synced in.
14595 */
14596VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14597{
14598 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14599 Assert(cbReg <= 4 && cbReg != 3);
14600
14601 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14602 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14604}
14605
14606
14607/**
14608 * Interface for rawmode to write execute an IN instruction.
14609 *
14610 * @returns Strict VBox status code.
14611 * @param pVCpu The cross context virtual CPU structure.
14612 * @param cbInstr The instruction length in bytes.
14613 * @param u16Port The port to read.
14614 * @param cbReg The register size.
14615 */
14616VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14617{
14618 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14619 Assert(cbReg <= 4 && cbReg != 3);
14620
14621 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14622 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14623 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14624}
14625
14626
14627/**
14628 * Interface for HM and EM to write to a CRx register.
14629 *
14630 * @returns Strict VBox status code.
14631 * @param pVCpu The cross context virtual CPU structure.
14632 * @param cbInstr The instruction length in bytes.
14633 * @param iCrReg The control register number (destination).
14634 * @param iGReg The general purpose register number (source).
14635 *
14636 * @remarks In ring-0 not all of the state needs to be synced in.
14637 */
14638VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14639{
14640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14641 Assert(iCrReg < 16);
14642 Assert(iGReg < 16);
14643
14644 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14645 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14646 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14647}
14648
14649
14650/**
14651 * Interface for HM and EM to read from a CRx register.
14652 *
14653 * @returns Strict VBox status code.
14654 * @param pVCpu The cross context virtual CPU structure.
14655 * @param cbInstr The instruction length in bytes.
14656 * @param iGReg The general purpose register number (destination).
14657 * @param iCrReg The control register number (source).
14658 *
14659 * @remarks In ring-0 not all of the state needs to be synced in.
14660 */
14661VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14662{
14663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14664 Assert(iCrReg < 16);
14665 Assert(iGReg < 16);
14666
14667 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14668 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14669 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14670}
14671
14672
14673/**
14674 * Interface for HM and EM to clear the CR0[TS] bit.
14675 *
14676 * @returns Strict VBox status code.
14677 * @param pVCpu The cross context virtual CPU structure.
14678 * @param cbInstr The instruction length in bytes.
14679 *
14680 * @remarks In ring-0 not all of the state needs to be synced in.
14681 */
14682VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14683{
14684 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14685
14686 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14687 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14688 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14689}
14690
14691
14692/**
14693 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14694 *
14695 * @returns Strict VBox status code.
14696 * @param pVCpu The cross context virtual CPU structure.
14697 * @param cbInstr The instruction length in bytes.
14698 * @param uValue The value to load into CR0.
14699 *
14700 * @remarks In ring-0 not all of the state needs to be synced in.
14701 */
14702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14703{
14704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14705
14706 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14707 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14708 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14709}
14710
14711
14712/**
14713 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14714 *
14715 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14716 *
14717 * @returns Strict VBox status code.
14718 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14719 * @param cbInstr The instruction length in bytes.
14720 * @remarks In ring-0 not all of the state needs to be synced in.
14721 * @thread EMT(pVCpu)
14722 */
14723VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14724{
14725 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14726
14727 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14728 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14729 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14730}
14731
14732#ifdef IN_RING3
14733
14734/**
14735 * Handles the unlikely and probably fatal merge cases.
14736 *
14737 * @returns Merged status code.
14738 * @param rcStrict Current EM status code.
14739 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14740 * with @a rcStrict.
14741 * @param iMemMap The memory mapping index. For error reporting only.
14742 * @param pVCpu The cross context virtual CPU structure of the calling
14743 * thread, for error reporting only.
14744 */
14745DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14746 unsigned iMemMap, PVMCPU pVCpu)
14747{
14748 if (RT_FAILURE_NP(rcStrict))
14749 return rcStrict;
14750
14751 if (RT_FAILURE_NP(rcStrictCommit))
14752 return rcStrictCommit;
14753
14754 if (rcStrict == rcStrictCommit)
14755 return rcStrictCommit;
14756
14757 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14758 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14759 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14760 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14761 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14762 return VERR_IOM_FF_STATUS_IPE;
14763}
14764
14765
14766/**
14767 * Helper for IOMR3ProcessForceFlag.
14768 *
14769 * @returns Merged status code.
14770 * @param rcStrict Current EM status code.
14771 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14772 * with @a rcStrict.
14773 * @param iMemMap The memory mapping index. For error reporting only.
14774 * @param pVCpu The cross context virtual CPU structure of the calling
14775 * thread, for error reporting only.
14776 */
14777DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14778{
14779 /* Simple. */
14780 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14781 return rcStrictCommit;
14782
14783 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14784 return rcStrict;
14785
14786 /* EM scheduling status codes. */
14787 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14788 && rcStrict <= VINF_EM_LAST))
14789 {
14790 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14791 && rcStrictCommit <= VINF_EM_LAST))
14792 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14793 }
14794
14795 /* Unlikely */
14796 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14797}
14798
14799
14800/**
14801 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14802 *
14803 * @returns Merge between @a rcStrict and what the commit operation returned.
14804 * @param pVM The cross context VM structure.
14805 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14806 * @param rcStrict The status code returned by ring-0 or raw-mode.
14807 */
14808VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14809{
14810 /*
14811 * Reset the pending commit.
14812 */
14813 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14814 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14815 ("%#x %#x %#x\n",
14816 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14817 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14818
14819 /*
14820 * Commit the pending bounce buffers (usually just one).
14821 */
14822 unsigned cBufs = 0;
14823 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14824 while (iMemMap-- > 0)
14825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14826 {
14827 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14828 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14829 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14830
14831 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14832 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14833 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14834
14835 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14836 {
14837 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14839 pbBuf,
14840 cbFirst,
14841 PGMACCESSORIGIN_IEM);
14842 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14843 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14844 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14845 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14846 }
14847
14848 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14849 {
14850 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14852 pbBuf + cbFirst,
14853 cbSecond,
14854 PGMACCESSORIGIN_IEM);
14855 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14856 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14857 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14858 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14859 }
14860 cBufs++;
14861 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14862 }
14863
14864 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14865 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14866 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14867 pVCpu->iem.s.cActiveMappings = 0;
14868 return rcStrict;
14869}
14870
14871#endif /* IN_RING3 */
14872
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette