VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62411

Last change on this file since 62411 was 62302, checked in by vboxsync, 8 years ago

IEM,PGM: Got code TLB working in ring-3, execution is 3-4 times faster when active (still disabled of course).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 573.0 KB
Line 
1/* $Id: IEMAll.cpp 62302 2016-07-18 13:58:10Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85
86/*********************************************************************************************************************************
87* Header Files *
88*********************************************************************************************************************************/
89#define LOG_GROUP LOG_GROUP_IEM
90#define VMCPU_INCL_CPUM_GST_CTX
91#include <VBox/vmm/iem.h>
92#include <VBox/vmm/cpum.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/tm.h>
99#include <VBox/vmm/dbgf.h>
100#include <VBox/vmm/dbgftrace.h>
101#ifdef VBOX_WITH_RAW_MODE_NOT_R0
102# include <VBox/vmm/patm.h>
103# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
104# include <VBox/vmm/csam.h>
105# endif
106#endif
107#include "IEMInternal.h"
108#ifdef IEM_VERIFICATION_MODE_FULL
109# include <VBox/vmm/rem.h>
110# include <VBox/vmm/mm.h>
111#endif
112#include <VBox/vmm/vm.h>
113#include <VBox/log.h>
114#include <VBox/err.h>
115#include <VBox/param.h>
116#include <VBox/dis.h>
117#include <VBox/disopcode.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209
210/*********************************************************************************************************************************
211* Defined Constants And Macros *
212*********************************************************************************************************************************/
213/** @def IEM_WITH_SETJMP
214 * Enables alternative status code handling using setjmps.
215 *
216 * This adds a bit of expense via the setjmp() call since it saves all the
217 * non-volatile registers. However, it eliminates return code checks and allows
218 * for more optimal return value passing (return regs instead of stack buffer).
219 */
220#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
221# define IEM_WITH_SETJMP
222#endif
223
224/** Temporary hack to disable the double execution. Will be removed in favor
225 * of a dedicated execution mode in EM. */
226//#define IEM_VERIFICATION_MODE_NO_REM
227
228/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
229 * due to GCC lacking knowledge about the value range of a switch. */
230#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
231
232/**
233 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
234 * occation.
235 */
236#ifdef LOG_ENABLED
237# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
238 do { \
239 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
240 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
241 } while (0)
242#else
243# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
244 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
245#endif
246
247/**
248 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
249 * occation using the supplied logger statement.
250 *
251 * @param a_LoggerArgs What to log on failure.
252 */
253#ifdef LOG_ENABLED
254# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
255 do { \
256 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
257 /*LogFunc(a_LoggerArgs);*/ \
258 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
259 } while (0)
260#else
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
263#endif
264
265/**
266 * Call an opcode decoder function.
267 *
268 * We're using macors for this so that adding and removing parameters can be
269 * done as we please. See FNIEMOP_DEF.
270 */
271#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
272
273/**
274 * Call a common opcode decoder function taking one extra argument.
275 *
276 * We're using macors for this so that adding and removing parameters can be
277 * done as we please. See FNIEMOP_DEF_1.
278 */
279#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
280
281/**
282 * Call a common opcode decoder function taking one extra argument.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF_1.
286 */
287#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
288
289/**
290 * Check if we're currently executing in real or virtual 8086 mode.
291 *
292 * @returns @c true if it is, @c false if not.
293 * @param a_pVCpu The IEM state of the current CPU.
294 */
295#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
296
297/**
298 * Check if we're currently executing in virtual 8086 mode.
299 *
300 * @returns @c true if it is, @c false if not.
301 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
302 */
303#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
304
305/**
306 * Check if we're currently executing in long mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
310 */
311#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in real mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
323 * @returns PCCPUMFEATURES
324 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
325 */
326#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
327
328/**
329 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
330 * @returns PCCPUMFEATURES
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
334
335/**
336 * Evaluates to true if we're presenting an Intel CPU to the guest.
337 */
338#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
339
340/**
341 * Evaluates to true if we're presenting an AMD CPU to the guest.
342 */
343#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
344
345/**
346 * Check if the address is canonical.
347 */
348#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
349
350/** @def IEM_USE_UNALIGNED_DATA_ACCESS
351 * Use unaligned accesses instead of elaborate byte assembly. */
352#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
353# define IEM_USE_UNALIGNED_DATA_ACCESS
354#endif
355
356
357/*********************************************************************************************************************************
358* Global Variables *
359*********************************************************************************************************************************/
360extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
361
362
363/** Function table for the ADD instruction. */
364IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
365{
366 iemAImpl_add_u8, iemAImpl_add_u8_locked,
367 iemAImpl_add_u16, iemAImpl_add_u16_locked,
368 iemAImpl_add_u32, iemAImpl_add_u32_locked,
369 iemAImpl_add_u64, iemAImpl_add_u64_locked
370};
371
372/** Function table for the ADC instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
374{
375 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
376 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
377 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
378 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
379};
380
381/** Function table for the SUB instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
383{
384 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
385 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
386 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
387 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
388};
389
390/** Function table for the SBB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
392{
393 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
394 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
395 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
396 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
397};
398
399/** Function table for the OR instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
401{
402 iemAImpl_or_u8, iemAImpl_or_u8_locked,
403 iemAImpl_or_u16, iemAImpl_or_u16_locked,
404 iemAImpl_or_u32, iemAImpl_or_u32_locked,
405 iemAImpl_or_u64, iemAImpl_or_u64_locked
406};
407
408/** Function table for the XOR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
410{
411 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
412 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
413 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
414 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
415};
416
417/** Function table for the AND instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
419{
420 iemAImpl_and_u8, iemAImpl_and_u8_locked,
421 iemAImpl_and_u16, iemAImpl_and_u16_locked,
422 iemAImpl_and_u32, iemAImpl_and_u32_locked,
423 iemAImpl_and_u64, iemAImpl_and_u64_locked
424};
425
426/** Function table for the CMP instruction.
427 * @remarks Making operand order ASSUMPTIONS.
428 */
429IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
430{
431 iemAImpl_cmp_u8, NULL,
432 iemAImpl_cmp_u16, NULL,
433 iemAImpl_cmp_u32, NULL,
434 iemAImpl_cmp_u64, NULL
435};
436
437/** Function table for the TEST instruction.
438 * @remarks Making operand order ASSUMPTIONS.
439 */
440IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
441{
442 iemAImpl_test_u8, NULL,
443 iemAImpl_test_u16, NULL,
444 iemAImpl_test_u32, NULL,
445 iemAImpl_test_u64, NULL
446};
447
448/** Function table for the BT instruction. */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
450{
451 NULL, NULL,
452 iemAImpl_bt_u16, NULL,
453 iemAImpl_bt_u32, NULL,
454 iemAImpl_bt_u64, NULL
455};
456
457/** Function table for the BTC instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
459{
460 NULL, NULL,
461 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
462 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
463 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
464};
465
466/** Function table for the BTR instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
468{
469 NULL, NULL,
470 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
471 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
472 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
473};
474
475/** Function table for the BTS instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
477{
478 NULL, NULL,
479 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
480 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
481 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
482};
483
484/** Function table for the BSF instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
486{
487 NULL, NULL,
488 iemAImpl_bsf_u16, NULL,
489 iemAImpl_bsf_u32, NULL,
490 iemAImpl_bsf_u64, NULL
491};
492
493/** Function table for the BSR instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
495{
496 NULL, NULL,
497 iemAImpl_bsr_u16, NULL,
498 iemAImpl_bsr_u32, NULL,
499 iemAImpl_bsr_u64, NULL
500};
501
502/** Function table for the IMUL instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
504{
505 NULL, NULL,
506 iemAImpl_imul_two_u16, NULL,
507 iemAImpl_imul_two_u32, NULL,
508 iemAImpl_imul_two_u64, NULL
509};
510
511/** Group 1 /r lookup table. */
512IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
513{
514 &g_iemAImpl_add,
515 &g_iemAImpl_or,
516 &g_iemAImpl_adc,
517 &g_iemAImpl_sbb,
518 &g_iemAImpl_and,
519 &g_iemAImpl_sub,
520 &g_iemAImpl_xor,
521 &g_iemAImpl_cmp
522};
523
524/** Function table for the INC instruction. */
525IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
526{
527 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
528 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
529 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
530 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
531};
532
533/** Function table for the DEC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
535{
536 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
537 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
538 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
539 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
540};
541
542/** Function table for the NEG instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
544{
545 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
546 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
547 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
548 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
549};
550
551/** Function table for the NOT instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
553{
554 iemAImpl_not_u8, iemAImpl_not_u8_locked,
555 iemAImpl_not_u16, iemAImpl_not_u16_locked,
556 iemAImpl_not_u32, iemAImpl_not_u32_locked,
557 iemAImpl_not_u64, iemAImpl_not_u64_locked
558};
559
560
561/** Function table for the ROL instruction. */
562IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
563{
564 iemAImpl_rol_u8,
565 iemAImpl_rol_u16,
566 iemAImpl_rol_u32,
567 iemAImpl_rol_u64
568};
569
570/** Function table for the ROR instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
572{
573 iemAImpl_ror_u8,
574 iemAImpl_ror_u16,
575 iemAImpl_ror_u32,
576 iemAImpl_ror_u64
577};
578
579/** Function table for the RCL instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
581{
582 iemAImpl_rcl_u8,
583 iemAImpl_rcl_u16,
584 iemAImpl_rcl_u32,
585 iemAImpl_rcl_u64
586};
587
588/** Function table for the RCR instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
590{
591 iemAImpl_rcr_u8,
592 iemAImpl_rcr_u16,
593 iemAImpl_rcr_u32,
594 iemAImpl_rcr_u64
595};
596
597/** Function table for the SHL instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
599{
600 iemAImpl_shl_u8,
601 iemAImpl_shl_u16,
602 iemAImpl_shl_u32,
603 iemAImpl_shl_u64
604};
605
606/** Function table for the SHR instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
608{
609 iemAImpl_shr_u8,
610 iemAImpl_shr_u16,
611 iemAImpl_shr_u32,
612 iemAImpl_shr_u64
613};
614
615/** Function table for the SAR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
617{
618 iemAImpl_sar_u8,
619 iemAImpl_sar_u16,
620 iemAImpl_sar_u32,
621 iemAImpl_sar_u64
622};
623
624
625/** Function table for the MUL instruction. */
626IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
627{
628 iemAImpl_mul_u8,
629 iemAImpl_mul_u16,
630 iemAImpl_mul_u32,
631 iemAImpl_mul_u64
632};
633
634/** Function table for the IMUL instruction working implicitly on rAX. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
636{
637 iemAImpl_imul_u8,
638 iemAImpl_imul_u16,
639 iemAImpl_imul_u32,
640 iemAImpl_imul_u64
641};
642
643/** Function table for the DIV instruction. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
645{
646 iemAImpl_div_u8,
647 iemAImpl_div_u16,
648 iemAImpl_div_u32,
649 iemAImpl_div_u64
650};
651
652/** Function table for the MUL instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
654{
655 iemAImpl_idiv_u8,
656 iemAImpl_idiv_u16,
657 iemAImpl_idiv_u32,
658 iemAImpl_idiv_u64
659};
660
661/** Function table for the SHLD instruction */
662IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
663{
664 iemAImpl_shld_u16,
665 iemAImpl_shld_u32,
666 iemAImpl_shld_u64,
667};
668
669/** Function table for the SHRD instruction */
670IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
671{
672 iemAImpl_shrd_u16,
673 iemAImpl_shrd_u32,
674 iemAImpl_shrd_u64,
675};
676
677
678/** Function table for the PUNPCKLBW instruction */
679IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
680/** Function table for the PUNPCKLBD instruction */
681IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
682/** Function table for the PUNPCKLDQ instruction */
683IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
684/** Function table for the PUNPCKLQDQ instruction */
685IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
686
687/** Function table for the PUNPCKHBW instruction */
688IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
689/** Function table for the PUNPCKHBD instruction */
690IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
691/** Function table for the PUNPCKHDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
693/** Function table for the PUNPCKHQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
695
696/** Function table for the PXOR instruction */
697IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
698/** Function table for the PCMPEQB instruction */
699IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
700/** Function table for the PCMPEQW instruction */
701IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
702/** Function table for the PCMPEQD instruction */
703IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
704
705
706#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
707/** What IEM just wrote. */
708uint8_t g_abIemWrote[256];
709/** How much IEM just wrote. */
710size_t g_cbIemWrote;
711#endif
712
713
714/*********************************************************************************************************************************
715* Internal Functions *
716*********************************************************************************************************************************/
717IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
718IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
719IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
721/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
722IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
723IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
724IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
725IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
726IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
729IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
730IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
732IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
733IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
734#ifdef IEM_WITH_SETJMP
735DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
736DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
737DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
740#endif
741
742IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
743IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
744IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
745IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
746IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
747IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
752IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
753IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
754IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
756IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
757IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
758
759#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
760IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
761#endif
762IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
764
765
766
767/**
768 * Sets the pass up status.
769 *
770 * @returns VINF_SUCCESS.
771 * @param pVCpu The cross context virtual CPU structure of the
772 * calling thread.
773 * @param rcPassUp The pass up status. Must be informational.
774 * VINF_SUCCESS is not allowed.
775 */
776IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
777{
778 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
779
780 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
781 if (rcOldPassUp == VINF_SUCCESS)
782 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
783 /* If both are EM scheduling codes, use EM priority rules. */
784 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
785 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
786 {
787 if (rcPassUp < rcOldPassUp)
788 {
789 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
790 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
791 }
792 else
793 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
794 }
795 /* Override EM scheduling with specific status code. */
796 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 /* Don't override specific status code, first come first served. */
802 else
803 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
804 return VINF_SUCCESS;
805}
806
807
808/**
809 * Calculates the CPU mode.
810 *
811 * This is mainly for updating IEMCPU::enmCpuMode.
812 *
813 * @returns CPU mode.
814 * @param pCtx The register context for the CPU.
815 */
816DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
817{
818 if (CPUMIsGuestIn64BitCodeEx(pCtx))
819 return IEMMODE_64BIT;
820 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
821 return IEMMODE_32BIT;
822 return IEMMODE_16BIT;
823}
824
825
826/**
827 * Initializes the execution state.
828 *
829 * @param pVCpu The cross context virtual CPU structure of the
830 * calling thread.
831 * @param fBypassHandlers Whether to bypass access handlers.
832 *
833 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
834 * side-effects in strict builds.
835 */
836DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
837{
838 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
839
840 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
841
842#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
851#endif
852
853#ifdef VBOX_WITH_RAW_MODE_NOT_R0
854 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
855#endif
856 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
857 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
858#ifdef VBOX_STRICT
859 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
860 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
861 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
862 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef;
864 pVCpu->iem.s.uRexReg = 127;
865 pVCpu->iem.s.uRexB = 127;
866 pVCpu->iem.s.uRexIndex = 127;
867 pVCpu->iem.s.iEffSeg = 127;
868 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
869# ifdef IEM_WITH_CODE_TLB
870 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
871 pVCpu->iem.s.pbInstrBuf = NULL;
872 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
873 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
874 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
875 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
876# else
877 pVCpu->iem.s.offOpcode = 127;
878 pVCpu->iem.s.cbOpcode = 127;
879# endif
880#endif
881
882 pVCpu->iem.s.cActiveMappings = 0;
883 pVCpu->iem.s.iNextMapping = 0;
884 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
885 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
886#ifdef VBOX_WITH_RAW_MODE_NOT_R0
887 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
888 && pCtx->cs.u64Base == 0
889 && pCtx->cs.u32Limit == UINT32_MAX
890 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
891 if (!pVCpu->iem.s.fInPatchCode)
892 CPUMRawLeave(pVCpu, VINF_SUCCESS);
893#endif
894
895#ifdef IEM_VERIFICATION_MODE_FULL
896 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
897 pVCpu->iem.s.fNoRem = true;
898#endif
899}
900
901
902/**
903 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
904 *
905 * @param pVCpu The cross context virtual CPU structure of the
906 * calling thread.
907 */
908DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
909{
910 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
911#ifdef IEM_VERIFICATION_MODE_FULL
912 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
913#endif
914#ifdef VBOX_STRICT
915# ifdef IEM_WITH_CODE_TLB
916# else
917 pVCpu->iem.s.cbOpcode = 0;
918# endif
919#else
920 NOREF(pVCpu);
921#endif
922}
923
924
925/**
926 * Initializes the decoder state.
927 *
928 * iemReInitDecoder is mostly a copy of this function.
929 *
930 * @param pVCpu The cross context virtual CPU structure of the
931 * calling thread.
932 * @param fBypassHandlers Whether to bypass access handlers.
933 */
934DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
935{
936 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
937
938 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
939
940#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
949#endif
950
951#ifdef VBOX_WITH_RAW_MODE_NOT_R0
952 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
953#endif
954 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
955#ifdef IEM_VERIFICATION_MODE_FULL
956 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
957 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
958#endif
959 IEMMODE enmMode = iemCalcCpuMode(pCtx);
960 pVCpu->iem.s.enmCpuMode = enmMode;
961 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
962 pVCpu->iem.s.enmEffAddrMode = enmMode;
963 if (enmMode != IEMMODE_64BIT)
964 {
965 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
966 pVCpu->iem.s.enmEffOpSize = enmMode;
967 }
968 else
969 {
970 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
971 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
972 }
973 pVCpu->iem.s.fPrefixes = 0;
974 pVCpu->iem.s.uRexReg = 0;
975 pVCpu->iem.s.uRexB = 0;
976 pVCpu->iem.s.uRexIndex = 0;
977 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
978#ifdef IEM_WITH_CODE_TLB
979 pVCpu->iem.s.pbInstrBuf = NULL;
980 pVCpu->iem.s.offInstrNextByte = 0;
981 pVCpu->iem.s.offCurInstrStart = 0;
982# ifdef VBOX_STRICT
983 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
984 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
985 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
986# endif
987#else
988 pVCpu->iem.s.offOpcode = 0;
989 pVCpu->iem.s.cbOpcode = 0;
990#endif
991 pVCpu->iem.s.cActiveMappings = 0;
992 pVCpu->iem.s.iNextMapping = 0;
993 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
994 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
995#ifdef VBOX_WITH_RAW_MODE_NOT_R0
996 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
997 && pCtx->cs.u64Base == 0
998 && pCtx->cs.u32Limit == UINT32_MAX
999 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1000 if (!pVCpu->iem.s.fInPatchCode)
1001 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1002#endif
1003
1004#ifdef DBGFTRACE_ENABLED
1005 switch (enmMode)
1006 {
1007 case IEMMODE_64BIT:
1008 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1009 break;
1010 case IEMMODE_32BIT:
1011 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1012 break;
1013 case IEMMODE_16BIT:
1014 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1015 break;
1016 }
1017#endif
1018}
1019
1020
1021/**
1022 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1023 *
1024 * This is mostly a copy of iemInitDecoder.
1025 *
1026 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1027 */
1028DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1029{
1030 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1031
1032 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1033
1034#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1043#endif
1044
1045 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1046#ifdef IEM_VERIFICATION_MODE_FULL
1047 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1048 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1049#endif
1050 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1051 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1052 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1053 pVCpu->iem.s.enmEffAddrMode = enmMode;
1054 if (enmMode != IEMMODE_64BIT)
1055 {
1056 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1057 pVCpu->iem.s.enmEffOpSize = enmMode;
1058 }
1059 else
1060 {
1061 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1062 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1063 }
1064 pVCpu->iem.s.fPrefixes = 0;
1065 pVCpu->iem.s.uRexReg = 0;
1066 pVCpu->iem.s.uRexB = 0;
1067 pVCpu->iem.s.uRexIndex = 0;
1068 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1069#ifdef IEM_WITH_CODE_TLB
1070 if (pVCpu->iem.s.pbInstrBuf)
1071 {
1072 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1073 - pVCpu->iem.s.uInstrBufPc;
1074 if (off < pVCpu->iem.s.cbInstrBufTotal)
1075 {
1076 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1077 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1078 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1079 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1080 else
1081 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1082 }
1083 else
1084 {
1085 pVCpu->iem.s.pbInstrBuf = NULL;
1086 pVCpu->iem.s.offInstrNextByte = 0;
1087 pVCpu->iem.s.offCurInstrStart = 0;
1088 pVCpu->iem.s.cbInstrBuf = 0;
1089 pVCpu->iem.s.cbInstrBufTotal = 0;
1090 }
1091 }
1092 else
1093 {
1094 pVCpu->iem.s.offInstrNextByte = 0;
1095 pVCpu->iem.s.offCurInstrStart = 0;
1096 pVCpu->iem.s.cbInstrBuf = 0;
1097 pVCpu->iem.s.cbInstrBufTotal = 0;
1098 }
1099#else
1100 pVCpu->iem.s.cbOpcode = 0;
1101 pVCpu->iem.s.offOpcode = 0;
1102#endif
1103 Assert(pVCpu->iem.s.cActiveMappings == 0);
1104 pVCpu->iem.s.iNextMapping = 0;
1105 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1106 Assert(pVCpu->iem.s.fBypassHandlers == false);
1107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1108 if (!pVCpu->iem.s.fInPatchCode)
1109 { /* likely */ }
1110 else
1111 {
1112 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1113 && pCtx->cs.u64Base == 0
1114 && pCtx->cs.u32Limit == UINT32_MAX
1115 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1116 if (!pVCpu->iem.s.fInPatchCode)
1117 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1118 }
1119#endif
1120
1121#ifdef DBGFTRACE_ENABLED
1122 switch (enmMode)
1123 {
1124 case IEMMODE_64BIT:
1125 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1126 break;
1127 case IEMMODE_32BIT:
1128 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1129 break;
1130 case IEMMODE_16BIT:
1131 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1132 break;
1133 }
1134#endif
1135}
1136
1137
1138
1139/**
1140 * Prefetch opcodes the first time when starting executing.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the
1144 * calling thread.
1145 * @param fBypassHandlers Whether to bypass access handlers.
1146 */
1147IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1148{
1149#ifdef IEM_VERIFICATION_MODE_FULL
1150 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1151#endif
1152 iemInitDecoder(pVCpu, fBypassHandlers);
1153
1154#ifdef IEM_WITH_CODE_TLB
1155 /** @todo Do ITLB lookup here. */
1156
1157#else /* !IEM_WITH_CODE_TLB */
1158
1159 /*
1160 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1161 *
1162 * First translate CS:rIP to a physical address.
1163 */
1164 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1165 uint32_t cbToTryRead;
1166 RTGCPTR GCPtrPC;
1167 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1168 {
1169 cbToTryRead = PAGE_SIZE;
1170 GCPtrPC = pCtx->rip;
1171 if (!IEM_IS_CANONICAL(GCPtrPC))
1172 return iemRaiseGeneralProtectionFault0(pVCpu);
1173 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1174 }
1175 else
1176 {
1177 uint32_t GCPtrPC32 = pCtx->eip;
1178 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1179 if (GCPtrPC32 > pCtx->cs.u32Limit)
1180 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1181 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1182 if (!cbToTryRead) /* overflowed */
1183 {
1184 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1185 cbToTryRead = UINT32_MAX;
1186 }
1187 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1188 Assert(GCPtrPC <= UINT32_MAX);
1189 }
1190
1191# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1192 /* Allow interpretation of patch manager code blocks since they can for
1193 instance throw #PFs for perfectly good reasons. */
1194 if (pVCpu->iem.s.fInPatchCode)
1195 {
1196 size_t cbRead = 0;
1197 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1198 AssertRCReturn(rc, rc);
1199 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1200 return VINF_SUCCESS;
1201 }
1202# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1203
1204 RTGCPHYS GCPhys;
1205 uint64_t fFlags;
1206 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1207 if (RT_FAILURE(rc))
1208 {
1209 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1210 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1211 }
1212 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1213 {
1214 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1215 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1216 }
1217 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1218 {
1219 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1220 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1221 }
1222 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1223 /** @todo Check reserved bits and such stuff. PGM is better at doing
1224 * that, so do it when implementing the guest virtual address
1225 * TLB... */
1226
1227# ifdef IEM_VERIFICATION_MODE_FULL
1228 /*
1229 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1230 * instruction.
1231 */
1232 /** @todo optimize this differently by not using PGMPhysRead. */
1233 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1234 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1235 if ( offPrevOpcodes < cbOldOpcodes
1236 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1237 {
1238 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1239 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1240 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1241 pVCpu->iem.s.cbOpcode = cbNew;
1242 return VINF_SUCCESS;
1243 }
1244# endif
1245
1246 /*
1247 * Read the bytes at this address.
1248 */
1249 PVM pVM = pVCpu->CTX_SUFF(pVM);
1250# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1251 size_t cbActual;
1252 if ( PATMIsEnabled(pVM)
1253 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1254 {
1255 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1256 Assert(cbActual > 0);
1257 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1258 }
1259 else
1260# endif
1261 {
1262 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1263 if (cbToTryRead > cbLeftOnPage)
1264 cbToTryRead = cbLeftOnPage;
1265 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1266 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1267
1268 if (!pVCpu->iem.s.fBypassHandlers)
1269 {
1270 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1271 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1272 { /* likely */ }
1273 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1274 {
1275 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1276 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1277 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1278 }
1279 else
1280 {
1281 Log((RT_SUCCESS(rcStrict)
1282 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1283 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1284 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 return rcStrict;
1286 }
1287 }
1288 else
1289 {
1290 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1291 if (RT_SUCCESS(rc))
1292 { /* likely */ }
1293 else
1294 {
1295 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1296 GCPtrPC, GCPhys, rc, cbToTryRead));
1297 return rc;
1298 }
1299 }
1300 pVCpu->iem.s.cbOpcode = cbToTryRead;
1301 }
1302#endif /* !IEM_WITH_CODE_TLB */
1303 return VINF_SUCCESS;
1304}
1305
1306
1307/**
1308 * Invalidates the IEM TLBs.
1309 *
1310 * This is called internally as well as by PGM when moving GC mappings.
1311 *
1312 * @returns
1313 * @param pVCpu The cross context virtual CPU structure of the calling
1314 * thread.
1315 * @param fVmm Set when PGM calls us with a remapping.
1316 */
1317VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1318{
1319#ifdef IEM_WITH_CODE_TLB
1320 pVCpu->iem.s.cbInstrBufTotal = 0;
1321 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1322 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1323 { /* very likely */ }
1324 else
1325 {
1326 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1327 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1328 while (i-- > 0)
1329 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1330 }
1331#endif
1332
1333#ifdef IEM_WITH_DATA_TLB
1334 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1335 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1336 { /* very likely */ }
1337 else
1338 {
1339 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1340 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1341 while (i-- > 0)
1342 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1343 }
1344#endif
1345 NOREF(pVCpu); NOREF(fVmm);
1346}
1347
1348
1349/**
1350 * Invalidates a page in the TLBs.
1351 *
1352 * @param pVCpu The cross context virtual CPU structure of the calling
1353 * thread.
1354 * @param GCPtr The address of the page to invalidate
1355 */
1356VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1357{
1358#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1359 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1360 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1361 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1362 uintptr_t idx = (uint8_t)GCPtr;
1363
1364# ifdef IEM_WITH_CODE_TLB
1365 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1366 {
1367 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1368 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1369 pVCpu->iem.s.cbInstrBufTotal = 0;
1370 }
1371# endif
1372
1373# ifdef IEM_WITH_DATA_TLB
1374 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1375 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1376# endif
1377#else
1378 NOREF(pVCpu); NOREF(GCPtr);
1379#endif
1380}
1381
1382
1383/**
1384 * Invalidates the host physical aspects of the IEM TLBs.
1385 *
1386 * This is called internally as well as by PGM when moving GC mappings.
1387 *
1388 * @param pVCpu The cross context virtual CPU structure of the calling
1389 * thread.
1390 */
1391VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1392{
1393#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1394 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1395
1396# ifdef IEM_WITH_CODE_TLB
1397 pVCpu->iem.s.cbInstrBufTotal = 0;
1398# endif
1399 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1400 if (uTlbPhysRev != 0)
1401 {
1402 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1403 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1404 }
1405 else
1406 {
1407 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1408 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1409
1410 unsigned i;
1411# ifdef IEM_WITH_CODE_TLB
1412 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1413 while (i-- > 0)
1414 {
1415 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1416 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1417 }
1418# endif
1419# ifdef IEM_WITH_DATA_TLB
1420 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1421 while (i-- > 0)
1422 {
1423 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1424 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1425 }
1426# endif
1427 }
1428#else
1429 NOREF(pVCpu);
1430#endif
1431}
1432
1433
1434/**
1435 * Invalidates the host physical aspects of the IEM TLBs.
1436 *
1437 * This is called internally as well as by PGM when moving GC mappings.
1438 *
1439 * @param pVM The cross context VM structure.
1440 *
1441 * @remarks Caller holds the PGM lock.
1442 */
1443VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1444{
1445
1446}
1447
1448#ifdef IEM_WITH_CODE_TLB
1449
1450/**
1451 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1452 * failure and jumps.
1453 *
1454 * We end up here for a number of reasons:
1455 * - pbInstrBuf isn't yet initialized.
1456 * - Advancing beyond the buffer boundrary (e.g. cross page).
1457 * - Advancing beyond the CS segment limit.
1458 * - Fetching from non-mappable page (e.g. MMIO).
1459 *
1460 * @param pVCpu The cross context virtual CPU structure of the
1461 * calling thread.
1462 * @param pvDst Where to return the bytes.
1463 * @param cbDst Number of bytes to read.
1464 *
1465 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1466 */
1467IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1468{
1469#ifdef IN_RING3
1470//__debugbreak();
1471#else
1472 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1473#endif
1474 for (;;)
1475 {
1476 Assert(cbDst <= 8);
1477 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1478
1479 /*
1480 * We might have a partial buffer match, deal with that first to make the
1481 * rest simpler. This is the first part of the cross page/buffer case.
1482 */
1483 if (pVCpu->iem.s.pbInstrBuf != NULL)
1484 {
1485 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1486 {
1487 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1488 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1489 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1490
1491 cbDst -= cbCopy;
1492 pvDst = (uint8_t *)pvDst + cbCopy;
1493 offBuf += cbCopy;
1494 pVCpu->iem.s.offInstrNextByte += offBuf;
1495 }
1496 }
1497
1498 /*
1499 * Check segment limit, figuring how much we're allowed to access at this point.
1500 *
1501 * We will fault immediately if RIP is past the segment limit / in non-canonical
1502 * territory. If we do continue, there are one or more bytes to read before we
1503 * end up in trouble and we need to do that first before faulting.
1504 */
1505 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1506 RTGCPTR GCPtrFirst;
1507 uint32_t cbMaxRead;
1508 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1509 {
1510 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1511 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1512 { /* likely */ }
1513 else
1514 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1515 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1516 }
1517 else
1518 {
1519 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1520 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1521 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1522 { /* likely */ }
1523 else
1524 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1525 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1526 if (cbMaxRead != 0)
1527 { /* likely */ }
1528 else
1529 {
1530 /* Overflowed because address is 0 and limit is max. */
1531 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1532 cbMaxRead = X86_PAGE_SIZE;
1533 }
1534 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1535 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1536 if (cbMaxRead2 < cbMaxRead)
1537 cbMaxRead = cbMaxRead2;
1538 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1539 }
1540
1541 /*
1542 * Get the TLB entry for this piece of code.
1543 */
1544 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1545 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1546 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1547 if (pTlbe->uTag == uTag)
1548 {
1549 /* likely when executing lots of code, otherwise unlikely */
1550# ifdef VBOX_WITH_STATISTICS
1551 pVCpu->iem.s.CodeTlb.cTlbHits++;
1552# endif
1553 }
1554 else
1555 {
1556 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1557# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1558 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1559 {
1560 pTlbe->uTag = uTag;
1561 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1562 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1563 pTlbe->GCPhys = NIL_RTGCPHYS;
1564 pTlbe->pbMappingR3 = NULL;
1565 }
1566 else
1567# endif
1568 {
1569 RTGCPHYS GCPhys;
1570 uint64_t fFlags;
1571 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1572 if (RT_FAILURE(rc))
1573 {
1574 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1575 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1576 }
1577
1578 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1579 pTlbe->uTag = uTag;
1580 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1581 pTlbe->GCPhys = GCPhys;
1582 pTlbe->pbMappingR3 = NULL;
1583 }
1584 }
1585
1586 /*
1587 * Check TLB page table level access flags.
1588 */
1589 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1590 {
1591 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1592 {
1593 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1594 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1595 }
1596 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1597 {
1598 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1599 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1600 }
1601 }
1602
1603# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1604 /*
1605 * Allow interpretation of patch manager code blocks since they can for
1606 * instance throw #PFs for perfectly good reasons.
1607 */
1608 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1609 { /* no unlikely */ }
1610 else
1611 {
1612 /** @todo Could be optimized this a little in ring-3 if we liked. */
1613 size_t cbRead = 0;
1614 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1615 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1616 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1617 return;
1618 }
1619# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1620
1621 /*
1622 * Look up the physical page info if necessary.
1623 */
1624 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1625 { /* not necessary */ }
1626 else
1627 {
1628 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1629 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1630 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1631 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1632 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1633 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1634 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1635 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1636 }
1637
1638# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1639 /*
1640 * Try do a direct read using the pbMappingR3 pointer.
1641 */
1642 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1643 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1644 {
1645 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1646 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1647 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1648 {
1649 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1650 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1651 }
1652 else
1653 {
1654 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1655 Assert(cbInstr < cbMaxRead);
1656 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1657 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1658 }
1659 if (cbDst <= cbMaxRead)
1660 {
1661 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1662 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1663 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1664 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1665 return;
1666 }
1667 pVCpu->iem.s.pbInstrBuf = NULL;
1668
1669 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1670 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1671 }
1672 else
1673# endif
1674#if 0
1675 /*
1676 * If there is no special read handling, so we can read a bit more and
1677 * put it in the prefetch buffer.
1678 */
1679 if ( cbDst < cbMaxRead
1680 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1681 {
1682 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1683 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1684 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1685 { /* likely */ }
1686 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1687 {
1688 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1689 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1690 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1691 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1692 }
1693 else
1694 {
1695 Log((RT_SUCCESS(rcStrict)
1696 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1697 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1698 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1699 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1700 }
1701 }
1702 /*
1703 * Special read handling, so only read exactly what's needed.
1704 * This is a highly unlikely scenario.
1705 */
1706 else
1707#endif
1708 {
1709 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1710 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1711 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1712 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1713 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1714 { /* likely */ }
1715 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1716 {
1717 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1718 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1719 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1720 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1721 }
1722 else
1723 {
1724 Log((RT_SUCCESS(rcStrict)
1725 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1726 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1727 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1728 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1729 }
1730 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1731 if (cbToRead == cbDst)
1732 return;
1733 }
1734
1735 /*
1736 * More to read, loop.
1737 */
1738 cbDst -= cbMaxRead;
1739 pvDst = (uint8_t *)pvDst + cbMaxRead;
1740 }
1741}
1742
1743#else
1744
1745/**
1746 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1747 * exception if it fails.
1748 *
1749 * @returns Strict VBox status code.
1750 * @param pVCpu The cross context virtual CPU structure of the
1751 * calling thread.
1752 * @param cbMin The minimum number of bytes relative offOpcode
1753 * that must be read.
1754 */
1755IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1756{
1757 /*
1758 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1759 *
1760 * First translate CS:rIP to a physical address.
1761 */
1762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1763 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1764 uint32_t cbToTryRead;
1765 RTGCPTR GCPtrNext;
1766 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1767 {
1768 cbToTryRead = PAGE_SIZE;
1769 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1770 if (!IEM_IS_CANONICAL(GCPtrNext))
1771 return iemRaiseGeneralProtectionFault0(pVCpu);
1772 }
1773 else
1774 {
1775 uint32_t GCPtrNext32 = pCtx->eip;
1776 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1777 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1778 if (GCPtrNext32 > pCtx->cs.u32Limit)
1779 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1780 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1781 if (!cbToTryRead) /* overflowed */
1782 {
1783 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1784 cbToTryRead = UINT32_MAX;
1785 /** @todo check out wrapping around the code segment. */
1786 }
1787 if (cbToTryRead < cbMin - cbLeft)
1788 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1789 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1790 }
1791
1792 /* Only read up to the end of the page, and make sure we don't read more
1793 than the opcode buffer can hold. */
1794 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1795 if (cbToTryRead > cbLeftOnPage)
1796 cbToTryRead = cbLeftOnPage;
1797 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1798 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1799/** @todo r=bird: Convert assertion into undefined opcode exception? */
1800 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1801
1802# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1803 /* Allow interpretation of patch manager code blocks since they can for
1804 instance throw #PFs for perfectly good reasons. */
1805 if (pVCpu->iem.s.fInPatchCode)
1806 {
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1809 AssertRCReturn(rc, rc);
1810 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1811 return VINF_SUCCESS;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 RTGCPHYS GCPhys;
1816 uint64_t fFlags;
1817 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1818 if (RT_FAILURE(rc))
1819 {
1820 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1821 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1822 }
1823 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1824 {
1825 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1826 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1827 }
1828 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1829 {
1830 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1831 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1832 }
1833 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1834 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1835 /** @todo Check reserved bits and such stuff. PGM is better at doing
1836 * that, so do it when implementing the guest virtual address
1837 * TLB... */
1838
1839 /*
1840 * Read the bytes at this address.
1841 *
1842 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1843 * and since PATM should only patch the start of an instruction there
1844 * should be no need to check again here.
1845 */
1846 if (!pVCpu->iem.s.fBypassHandlers)
1847 {
1848 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1849 cbToTryRead, PGMACCESSORIGIN_IEM);
1850 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1851 { /* likely */ }
1852 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1853 {
1854 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1855 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1856 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1857 }
1858 else
1859 {
1860 Log((RT_SUCCESS(rcStrict)
1861 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1862 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1863 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1864 return rcStrict;
1865 }
1866 }
1867 else
1868 {
1869 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1870 if (RT_SUCCESS(rc))
1871 { /* likely */ }
1872 else
1873 {
1874 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1875 return rc;
1876 }
1877 }
1878 pVCpu->iem.s.cbOpcode += cbToTryRead;
1879 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1880
1881 return VINF_SUCCESS;
1882}
1883
1884#endif /* !IEM_WITH_CODE_TLB */
1885#ifndef IEM_WITH_SETJMP
1886
1887/**
1888 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1889 *
1890 * @returns Strict VBox status code.
1891 * @param pVCpu The cross context virtual CPU structure of the
1892 * calling thread.
1893 * @param pb Where to return the opcode byte.
1894 */
1895DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1896{
1897 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1898 if (rcStrict == VINF_SUCCESS)
1899 {
1900 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1901 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1902 pVCpu->iem.s.offOpcode = offOpcode + 1;
1903 }
1904 else
1905 *pb = 0;
1906 return rcStrict;
1907}
1908
1909
1910/**
1911 * Fetches the next opcode byte.
1912 *
1913 * @returns Strict VBox status code.
1914 * @param pVCpu The cross context virtual CPU structure of the
1915 * calling thread.
1916 * @param pu8 Where to return the opcode byte.
1917 */
1918DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1919{
1920 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1921 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1922 {
1923 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1924 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1925 return VINF_SUCCESS;
1926 }
1927 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1928}
1929
1930#else /* IEM_WITH_SETJMP */
1931
1932/**
1933 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1934 *
1935 * @returns The opcode byte.
1936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1937 */
1938DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1939{
1940# ifdef IEM_WITH_CODE_TLB
1941 uint8_t u8;
1942 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1943 return u8;
1944# else
1945 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1946 if (rcStrict == VINF_SUCCESS)
1947 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1948 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1949# endif
1950}
1951
1952
1953/**
1954 * Fetches the next opcode byte, longjmp on error.
1955 *
1956 * @returns The opcode byte.
1957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1958 */
1959DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1960{
1961# ifdef IEM_WITH_CODE_TLB
1962 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1963 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1964 if (RT_LIKELY( pbBuf != NULL
1965 && offBuf < pVCpu->iem.s.cbInstrBuf))
1966 {
1967 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1968 return pbBuf[offBuf];
1969 }
1970# else
1971 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1972 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1973 {
1974 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1975 return pVCpu->iem.s.abOpcode[offOpcode];
1976 }
1977# endif
1978 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1979}
1980
1981#endif /* IEM_WITH_SETJMP */
1982
1983/**
1984 * Fetches the next opcode byte, returns automatically on failure.
1985 *
1986 * @param a_pu8 Where to return the opcode byte.
1987 * @remark Implicitly references pVCpu.
1988 */
1989#ifndef IEM_WITH_SETJMP
1990# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1991 do \
1992 { \
1993 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1994 if (rcStrict2 == VINF_SUCCESS) \
1995 { /* likely */ } \
1996 else \
1997 return rcStrict2; \
1998 } while (0)
1999#else
2000# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2001#endif /* IEM_WITH_SETJMP */
2002
2003
2004#ifndef IEM_WITH_SETJMP
2005/**
2006 * Fetches the next signed byte from the opcode stream.
2007 *
2008 * @returns Strict VBox status code.
2009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2010 * @param pi8 Where to return the signed byte.
2011 */
2012DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2013{
2014 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2015}
2016#endif /* !IEM_WITH_SETJMP */
2017
2018
2019/**
2020 * Fetches the next signed byte from the opcode stream, returning automatically
2021 * on failure.
2022 *
2023 * @param a_pi8 Where to return the signed byte.
2024 * @remark Implicitly references pVCpu.
2025 */
2026#ifndef IEM_WITH_SETJMP
2027# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2028 do \
2029 { \
2030 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2031 if (rcStrict2 != VINF_SUCCESS) \
2032 return rcStrict2; \
2033 } while (0)
2034#else /* IEM_WITH_SETJMP */
2035# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2036
2037#endif /* IEM_WITH_SETJMP */
2038
2039#ifndef IEM_WITH_SETJMP
2040
2041/**
2042 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2043 *
2044 * @returns Strict VBox status code.
2045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2046 * @param pu16 Where to return the opcode dword.
2047 */
2048DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2049{
2050 uint8_t u8;
2051 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2052 if (rcStrict == VINF_SUCCESS)
2053 *pu16 = (int8_t)u8;
2054 return rcStrict;
2055}
2056
2057
2058/**
2059 * Fetches the next signed byte from the opcode stream, extending it to
2060 * unsigned 16-bit.
2061 *
2062 * @returns Strict VBox status code.
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param pu16 Where to return the unsigned word.
2065 */
2066DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2067{
2068 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2069 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2070 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2071
2072 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2073 pVCpu->iem.s.offOpcode = offOpcode + 1;
2074 return VINF_SUCCESS;
2075}
2076
2077#endif /* !IEM_WITH_SETJMP */
2078
2079/**
2080 * Fetches the next signed byte from the opcode stream and sign-extending it to
2081 * a word, returning automatically on failure.
2082 *
2083 * @param a_pu16 Where to return the word.
2084 * @remark Implicitly references pVCpu.
2085 */
2086#ifndef IEM_WITH_SETJMP
2087# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2088 do \
2089 { \
2090 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2091 if (rcStrict2 != VINF_SUCCESS) \
2092 return rcStrict2; \
2093 } while (0)
2094#else
2095# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2096#endif
2097
2098#ifndef IEM_WITH_SETJMP
2099
2100/**
2101 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2102 *
2103 * @returns Strict VBox status code.
2104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2105 * @param pu32 Where to return the opcode dword.
2106 */
2107DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2108{
2109 uint8_t u8;
2110 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2111 if (rcStrict == VINF_SUCCESS)
2112 *pu32 = (int8_t)u8;
2113 return rcStrict;
2114}
2115
2116
2117/**
2118 * Fetches the next signed byte from the opcode stream, extending it to
2119 * unsigned 32-bit.
2120 *
2121 * @returns Strict VBox status code.
2122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2123 * @param pu32 Where to return the unsigned dword.
2124 */
2125DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2126{
2127 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2128 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2129 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2130
2131 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2132 pVCpu->iem.s.offOpcode = offOpcode + 1;
2133 return VINF_SUCCESS;
2134}
2135
2136#endif /* !IEM_WITH_SETJMP */
2137
2138/**
2139 * Fetches the next signed byte from the opcode stream and sign-extending it to
2140 * a word, returning automatically on failure.
2141 *
2142 * @param a_pu32 Where to return the word.
2143 * @remark Implicitly references pVCpu.
2144 */
2145#ifndef IEM_WITH_SETJMP
2146#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2147 do \
2148 { \
2149 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2150 if (rcStrict2 != VINF_SUCCESS) \
2151 return rcStrict2; \
2152 } while (0)
2153#else
2154# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2155#endif
2156
2157#ifndef IEM_WITH_SETJMP
2158
2159/**
2160 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2161 *
2162 * @returns Strict VBox status code.
2163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2164 * @param pu64 Where to return the opcode qword.
2165 */
2166DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2167{
2168 uint8_t u8;
2169 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2170 if (rcStrict == VINF_SUCCESS)
2171 *pu64 = (int8_t)u8;
2172 return rcStrict;
2173}
2174
2175
2176/**
2177 * Fetches the next signed byte from the opcode stream, extending it to
2178 * unsigned 64-bit.
2179 *
2180 * @returns Strict VBox status code.
2181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2182 * @param pu64 Where to return the unsigned qword.
2183 */
2184DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2185{
2186 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2187 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2188 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2189
2190 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2191 pVCpu->iem.s.offOpcode = offOpcode + 1;
2192 return VINF_SUCCESS;
2193}
2194
2195#endif /* !IEM_WITH_SETJMP */
2196
2197
2198/**
2199 * Fetches the next signed byte from the opcode stream and sign-extending it to
2200 * a word, returning automatically on failure.
2201 *
2202 * @param a_pu64 Where to return the word.
2203 * @remark Implicitly references pVCpu.
2204 */
2205#ifndef IEM_WITH_SETJMP
2206# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2207 do \
2208 { \
2209 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2210 if (rcStrict2 != VINF_SUCCESS) \
2211 return rcStrict2; \
2212 } while (0)
2213#else
2214# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2215#endif
2216
2217
2218#ifndef IEM_WITH_SETJMP
2219
2220/**
2221 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2222 *
2223 * @returns Strict VBox status code.
2224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2225 * @param pu16 Where to return the opcode word.
2226 */
2227DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2228{
2229 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2230 if (rcStrict == VINF_SUCCESS)
2231 {
2232 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2233# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2234 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2235# else
2236 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2237# endif
2238 pVCpu->iem.s.offOpcode = offOpcode + 2;
2239 }
2240 else
2241 *pu16 = 0;
2242 return rcStrict;
2243}
2244
2245
2246/**
2247 * Fetches the next opcode word.
2248 *
2249 * @returns Strict VBox status code.
2250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2251 * @param pu16 Where to return the opcode word.
2252 */
2253DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2254{
2255 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2256 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2257 {
2258 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2259# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2260 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2261# else
2262 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2263# endif
2264 return VINF_SUCCESS;
2265 }
2266 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2267}
2268
2269#else /* IEM_WITH_SETJMP */
2270
2271/**
2272 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2273 *
2274 * @returns The opcode word.
2275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2276 */
2277DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2278{
2279# ifdef IEM_WITH_CODE_TLB
2280 uint16_t u16;
2281 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2282 return u16;
2283# else
2284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2285 if (rcStrict == VINF_SUCCESS)
2286 {
2287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2288 pVCpu->iem.s.offOpcode += 2;
2289# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2290 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2291# else
2292 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2293# endif
2294 }
2295 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2296# endif
2297}
2298
2299
2300/**
2301 * Fetches the next opcode word, longjmp on error.
2302 *
2303 * @returns The opcode word.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 */
2306DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2307{
2308# ifdef IEM_WITH_CODE_TLB
2309 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2310 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2311 if (RT_LIKELY( pbBuf != NULL
2312 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2313 {
2314 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2315# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2316 return *(uint16_t const *)&pbBuf[offBuf];
2317# else
2318 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2319# endif
2320 }
2321# else
2322 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2323 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2324 {
2325 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2326# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2327 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2328# else
2329 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2330# endif
2331 }
2332# endif
2333 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2334}
2335
2336#endif /* IEM_WITH_SETJMP */
2337
2338
2339/**
2340 * Fetches the next opcode word, returns automatically on failure.
2341 *
2342 * @param a_pu16 Where to return the opcode word.
2343 * @remark Implicitly references pVCpu.
2344 */
2345#ifndef IEM_WITH_SETJMP
2346# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2347 do \
2348 { \
2349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2350 if (rcStrict2 != VINF_SUCCESS) \
2351 return rcStrict2; \
2352 } while (0)
2353#else
2354# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2355#endif
2356
2357#ifndef IEM_WITH_SETJMP
2358
2359/**
2360 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2361 *
2362 * @returns Strict VBox status code.
2363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2364 * @param pu32 Where to return the opcode double word.
2365 */
2366DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2367{
2368 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2369 if (rcStrict == VINF_SUCCESS)
2370 {
2371 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2372 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2373 pVCpu->iem.s.offOpcode = offOpcode + 2;
2374 }
2375 else
2376 *pu32 = 0;
2377 return rcStrict;
2378}
2379
2380
2381/**
2382 * Fetches the next opcode word, zero extending it to a double word.
2383 *
2384 * @returns Strict VBox status code.
2385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2386 * @param pu32 Where to return the opcode double word.
2387 */
2388DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2389{
2390 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2391 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2392 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2393
2394 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2395 pVCpu->iem.s.offOpcode = offOpcode + 2;
2396 return VINF_SUCCESS;
2397}
2398
2399#endif /* !IEM_WITH_SETJMP */
2400
2401
2402/**
2403 * Fetches the next opcode word and zero extends it to a double word, returns
2404 * automatically on failure.
2405 *
2406 * @param a_pu32 Where to return the opcode double word.
2407 * @remark Implicitly references pVCpu.
2408 */
2409#ifndef IEM_WITH_SETJMP
2410# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2411 do \
2412 { \
2413 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2414 if (rcStrict2 != VINF_SUCCESS) \
2415 return rcStrict2; \
2416 } while (0)
2417#else
2418# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2419#endif
2420
2421#ifndef IEM_WITH_SETJMP
2422
2423/**
2424 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2425 *
2426 * @returns Strict VBox status code.
2427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2428 * @param pu64 Where to return the opcode quad word.
2429 */
2430DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2431{
2432 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2433 if (rcStrict == VINF_SUCCESS)
2434 {
2435 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2436 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2437 pVCpu->iem.s.offOpcode = offOpcode + 2;
2438 }
2439 else
2440 *pu64 = 0;
2441 return rcStrict;
2442}
2443
2444
2445/**
2446 * Fetches the next opcode word, zero extending it to a quad word.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pu64 Where to return the opcode quad word.
2451 */
2452DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2453{
2454 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2455 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2456 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2457
2458 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2459 pVCpu->iem.s.offOpcode = offOpcode + 2;
2460 return VINF_SUCCESS;
2461}
2462
2463#endif /* !IEM_WITH_SETJMP */
2464
2465/**
2466 * Fetches the next opcode word and zero extends it to a quad word, returns
2467 * automatically on failure.
2468 *
2469 * @param a_pu64 Where to return the opcode quad word.
2470 * @remark Implicitly references pVCpu.
2471 */
2472#ifndef IEM_WITH_SETJMP
2473# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2474 do \
2475 { \
2476 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2477 if (rcStrict2 != VINF_SUCCESS) \
2478 return rcStrict2; \
2479 } while (0)
2480#else
2481# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2482#endif
2483
2484
2485#ifndef IEM_WITH_SETJMP
2486/**
2487 * Fetches the next signed word from the opcode stream.
2488 *
2489 * @returns Strict VBox status code.
2490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2491 * @param pi16 Where to return the signed word.
2492 */
2493DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2494{
2495 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2496}
2497#endif /* !IEM_WITH_SETJMP */
2498
2499
2500/**
2501 * Fetches the next signed word from the opcode stream, returning automatically
2502 * on failure.
2503 *
2504 * @param a_pi16 Where to return the signed word.
2505 * @remark Implicitly references pVCpu.
2506 */
2507#ifndef IEM_WITH_SETJMP
2508# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2509 do \
2510 { \
2511 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2512 if (rcStrict2 != VINF_SUCCESS) \
2513 return rcStrict2; \
2514 } while (0)
2515#else
2516# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2517#endif
2518
2519#ifndef IEM_WITH_SETJMP
2520
2521/**
2522 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2523 *
2524 * @returns Strict VBox status code.
2525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2526 * @param pu32 Where to return the opcode dword.
2527 */
2528DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2529{
2530 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2531 if (rcStrict == VINF_SUCCESS)
2532 {
2533 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2534# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2535 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2536# else
2537 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2538 pVCpu->iem.s.abOpcode[offOpcode + 1],
2539 pVCpu->iem.s.abOpcode[offOpcode + 2],
2540 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2541# endif
2542 pVCpu->iem.s.offOpcode = offOpcode + 4;
2543 }
2544 else
2545 *pu32 = 0;
2546 return rcStrict;
2547}
2548
2549
2550/**
2551 * Fetches the next opcode dword.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu32 Where to return the opcode double word.
2556 */
2557DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2558{
2559 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2560 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2561 {
2562 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2567 pVCpu->iem.s.abOpcode[offOpcode + 1],
2568 pVCpu->iem.s.abOpcode[offOpcode + 2],
2569 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2570# endif
2571 return VINF_SUCCESS;
2572 }
2573 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2574}
2575
2576#else /* !IEM_WITH_SETJMP */
2577
2578/**
2579 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2580 *
2581 * @returns The opcode dword.
2582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2583 */
2584DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2585{
2586# ifdef IEM_WITH_CODE_TLB
2587 uint32_t u32;
2588 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2589 return u32;
2590# else
2591 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2592 if (rcStrict == VINF_SUCCESS)
2593 {
2594 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2595 pVCpu->iem.s.offOpcode = offOpcode + 4;
2596# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2597 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2598# else
2599 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2600 pVCpu->iem.s.abOpcode[offOpcode + 1],
2601 pVCpu->iem.s.abOpcode[offOpcode + 2],
2602 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2603# endif
2604 }
2605 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2606# endif
2607}
2608
2609
2610/**
2611 * Fetches the next opcode dword, longjmp on error.
2612 *
2613 * @returns The opcode dword.
2614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2615 */
2616DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2617{
2618# ifdef IEM_WITH_CODE_TLB
2619 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2620 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2621 if (RT_LIKELY( pbBuf != NULL
2622 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2623 {
2624 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 return *(uint32_t const *)&pbBuf[offBuf];
2627# else
2628 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2629 pbBuf[offBuf + 1],
2630 pbBuf[offBuf + 2],
2631 pbBuf[offBuf + 3]);
2632# endif
2633 }
2634# else
2635 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2636 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2637 {
2638 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2639# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2640 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2641# else
2642 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2643 pVCpu->iem.s.abOpcode[offOpcode + 1],
2644 pVCpu->iem.s.abOpcode[offOpcode + 2],
2645 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2646# endif
2647 }
2648# endif
2649 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2650}
2651
2652#endif /* !IEM_WITH_SETJMP */
2653
2654
2655/**
2656 * Fetches the next opcode dword, returns automatically on failure.
2657 *
2658 * @param a_pu32 Where to return the opcode dword.
2659 * @remark Implicitly references pVCpu.
2660 */
2661#ifndef IEM_WITH_SETJMP
2662# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2663 do \
2664 { \
2665 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2666 if (rcStrict2 != VINF_SUCCESS) \
2667 return rcStrict2; \
2668 } while (0)
2669#else
2670# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2671#endif
2672
2673#ifndef IEM_WITH_SETJMP
2674
2675/**
2676 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2677 *
2678 * @returns Strict VBox status code.
2679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2680 * @param pu64 Where to return the opcode dword.
2681 */
2682DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2683{
2684 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2685 if (rcStrict == VINF_SUCCESS)
2686 {
2687 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2688 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2689 pVCpu->iem.s.abOpcode[offOpcode + 1],
2690 pVCpu->iem.s.abOpcode[offOpcode + 2],
2691 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2692 pVCpu->iem.s.offOpcode = offOpcode + 4;
2693 }
2694 else
2695 *pu64 = 0;
2696 return rcStrict;
2697}
2698
2699
2700/**
2701 * Fetches the next opcode dword, zero extending it to a quad word.
2702 *
2703 * @returns Strict VBox status code.
2704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2705 * @param pu64 Where to return the opcode quad word.
2706 */
2707DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2708{
2709 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2710 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2711 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2712
2713 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2714 pVCpu->iem.s.abOpcode[offOpcode + 1],
2715 pVCpu->iem.s.abOpcode[offOpcode + 2],
2716 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2717 pVCpu->iem.s.offOpcode = offOpcode + 4;
2718 return VINF_SUCCESS;
2719}
2720
2721#endif /* !IEM_WITH_SETJMP */
2722
2723
2724/**
2725 * Fetches the next opcode dword and zero extends it to a quad word, returns
2726 * automatically on failure.
2727 *
2728 * @param a_pu64 Where to return the opcode quad word.
2729 * @remark Implicitly references pVCpu.
2730 */
2731#ifndef IEM_WITH_SETJMP
2732# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2733 do \
2734 { \
2735 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2736 if (rcStrict2 != VINF_SUCCESS) \
2737 return rcStrict2; \
2738 } while (0)
2739#else
2740# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2741#endif
2742
2743
2744#ifndef IEM_WITH_SETJMP
2745/**
2746 * Fetches the next signed double word from the opcode stream.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2750 * @param pi32 Where to return the signed double word.
2751 */
2752DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2753{
2754 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2755}
2756#endif
2757
2758/**
2759 * Fetches the next signed double word from the opcode stream, returning
2760 * automatically on failure.
2761 *
2762 * @param a_pi32 Where to return the signed double word.
2763 * @remark Implicitly references pVCpu.
2764 */
2765#ifndef IEM_WITH_SETJMP
2766# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2767 do \
2768 { \
2769 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2770 if (rcStrict2 != VINF_SUCCESS) \
2771 return rcStrict2; \
2772 } while (0)
2773#else
2774# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2775#endif
2776
2777#ifndef IEM_WITH_SETJMP
2778
2779/**
2780 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2781 *
2782 * @returns Strict VBox status code.
2783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2784 * @param pu64 Where to return the opcode qword.
2785 */
2786DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2787{
2788 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2789 if (rcStrict == VINF_SUCCESS)
2790 {
2791 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2792 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2793 pVCpu->iem.s.abOpcode[offOpcode + 1],
2794 pVCpu->iem.s.abOpcode[offOpcode + 2],
2795 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2796 pVCpu->iem.s.offOpcode = offOpcode + 4;
2797 }
2798 else
2799 *pu64 = 0;
2800 return rcStrict;
2801}
2802
2803
2804/**
2805 * Fetches the next opcode dword, sign extending it into a quad word.
2806 *
2807 * @returns Strict VBox status code.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 * @param pu64 Where to return the opcode quad word.
2810 */
2811DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2812{
2813 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2814 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2815 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2816
2817 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2818 pVCpu->iem.s.abOpcode[offOpcode + 1],
2819 pVCpu->iem.s.abOpcode[offOpcode + 2],
2820 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2821 *pu64 = i32;
2822 pVCpu->iem.s.offOpcode = offOpcode + 4;
2823 return VINF_SUCCESS;
2824}
2825
2826#endif /* !IEM_WITH_SETJMP */
2827
2828
2829/**
2830 * Fetches the next opcode double word and sign extends it to a quad word,
2831 * returns automatically on failure.
2832 *
2833 * @param a_pu64 Where to return the opcode quad word.
2834 * @remark Implicitly references pVCpu.
2835 */
2836#ifndef IEM_WITH_SETJMP
2837# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2838 do \
2839 { \
2840 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2841 if (rcStrict2 != VINF_SUCCESS) \
2842 return rcStrict2; \
2843 } while (0)
2844#else
2845# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2846#endif
2847
2848#ifndef IEM_WITH_SETJMP
2849
2850/**
2851 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2852 *
2853 * @returns Strict VBox status code.
2854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2855 * @param pu64 Where to return the opcode qword.
2856 */
2857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2858{
2859 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2860 if (rcStrict == VINF_SUCCESS)
2861 {
2862 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2863# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2864 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2865# else
2866 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2867 pVCpu->iem.s.abOpcode[offOpcode + 1],
2868 pVCpu->iem.s.abOpcode[offOpcode + 2],
2869 pVCpu->iem.s.abOpcode[offOpcode + 3],
2870 pVCpu->iem.s.abOpcode[offOpcode + 4],
2871 pVCpu->iem.s.abOpcode[offOpcode + 5],
2872 pVCpu->iem.s.abOpcode[offOpcode + 6],
2873 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2874# endif
2875 pVCpu->iem.s.offOpcode = offOpcode + 8;
2876 }
2877 else
2878 *pu64 = 0;
2879 return rcStrict;
2880}
2881
2882
2883/**
2884 * Fetches the next opcode qword.
2885 *
2886 * @returns Strict VBox status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2888 * @param pu64 Where to return the opcode qword.
2889 */
2890DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2891{
2892 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2893 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2894 {
2895# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2896 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2897# else
2898 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2899 pVCpu->iem.s.abOpcode[offOpcode + 1],
2900 pVCpu->iem.s.abOpcode[offOpcode + 2],
2901 pVCpu->iem.s.abOpcode[offOpcode + 3],
2902 pVCpu->iem.s.abOpcode[offOpcode + 4],
2903 pVCpu->iem.s.abOpcode[offOpcode + 5],
2904 pVCpu->iem.s.abOpcode[offOpcode + 6],
2905 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2906# endif
2907 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2908 return VINF_SUCCESS;
2909 }
2910 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2911}
2912
2913#else /* IEM_WITH_SETJMP */
2914
2915/**
2916 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2917 *
2918 * @returns The opcode qword.
2919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2920 */
2921DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2922{
2923# ifdef IEM_WITH_CODE_TLB
2924 uint64_t u64;
2925 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2926 return u64;
2927# else
2928 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2929 if (rcStrict == VINF_SUCCESS)
2930 {
2931 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2932 pVCpu->iem.s.offOpcode = offOpcode + 8;
2933# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2934 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2935# else
2936 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2937 pVCpu->iem.s.abOpcode[offOpcode + 1],
2938 pVCpu->iem.s.abOpcode[offOpcode + 2],
2939 pVCpu->iem.s.abOpcode[offOpcode + 3],
2940 pVCpu->iem.s.abOpcode[offOpcode + 4],
2941 pVCpu->iem.s.abOpcode[offOpcode + 5],
2942 pVCpu->iem.s.abOpcode[offOpcode + 6],
2943 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2944# endif
2945 }
2946 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2947# endif
2948}
2949
2950
2951/**
2952 * Fetches the next opcode qword, longjmp on error.
2953 *
2954 * @returns The opcode qword.
2955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2956 */
2957DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2958{
2959# ifdef IEM_WITH_CODE_TLB
2960 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2961 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2962 if (RT_LIKELY( pbBuf != NULL
2963 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2964 {
2965 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2966# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2967 return *(uint64_t const *)&pbBuf[offBuf];
2968# else
2969 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2970 pbBuf[offBuf + 1],
2971 pbBuf[offBuf + 2],
2972 pbBuf[offBuf + 3],
2973 pbBuf[offBuf + 4],
2974 pbBuf[offBuf + 5],
2975 pbBuf[offBuf + 6],
2976 pbBuf[offBuf + 7]);
2977# endif
2978 }
2979# else
2980 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2981 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2982 {
2983 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2984# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2985 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2986# else
2987 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2988 pVCpu->iem.s.abOpcode[offOpcode + 1],
2989 pVCpu->iem.s.abOpcode[offOpcode + 2],
2990 pVCpu->iem.s.abOpcode[offOpcode + 3],
2991 pVCpu->iem.s.abOpcode[offOpcode + 4],
2992 pVCpu->iem.s.abOpcode[offOpcode + 5],
2993 pVCpu->iem.s.abOpcode[offOpcode + 6],
2994 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2995# endif
2996 }
2997# endif
2998 return iemOpcodeGetNextU64SlowJmp(pVCpu);
2999}
3000
3001#endif /* IEM_WITH_SETJMP */
3002
3003/**
3004 * Fetches the next opcode quad word, returns automatically on failure.
3005 *
3006 * @param a_pu64 Where to return the opcode quad word.
3007 * @remark Implicitly references pVCpu.
3008 */
3009#ifndef IEM_WITH_SETJMP
3010# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3011 do \
3012 { \
3013 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3014 if (rcStrict2 != VINF_SUCCESS) \
3015 return rcStrict2; \
3016 } while (0)
3017#else
3018# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3019#endif
3020
3021
3022/** @name Misc Worker Functions.
3023 * @{
3024 */
3025
3026
3027/**
3028 * Validates a new SS segment.
3029 *
3030 * @returns VBox strict status code.
3031 * @param pVCpu The cross context virtual CPU structure of the
3032 * calling thread.
3033 * @param pCtx The CPU context.
3034 * @param NewSS The new SS selctor.
3035 * @param uCpl The CPL to load the stack for.
3036 * @param pDesc Where to return the descriptor.
3037 */
3038IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3039{
3040 NOREF(pCtx);
3041
3042 /* Null selectors are not allowed (we're not called for dispatching
3043 interrupts with SS=0 in long mode). */
3044 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3045 {
3046 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3047 return iemRaiseTaskSwitchFault0(pVCpu);
3048 }
3049
3050 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3051 if ((NewSS & X86_SEL_RPL) != uCpl)
3052 {
3053 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3054 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3055 }
3056
3057 /*
3058 * Read the descriptor.
3059 */
3060 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3061 if (rcStrict != VINF_SUCCESS)
3062 return rcStrict;
3063
3064 /*
3065 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3066 */
3067 if (!pDesc->Legacy.Gen.u1DescType)
3068 {
3069 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3070 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3071 }
3072
3073 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3074 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3075 {
3076 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3077 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3078 }
3079 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3080 {
3081 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3082 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3083 }
3084
3085 /* Is it there? */
3086 /** @todo testcase: Is this checked before the canonical / limit check below? */
3087 if (!pDesc->Legacy.Gen.u1Present)
3088 {
3089 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3090 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3091 }
3092
3093 return VINF_SUCCESS;
3094}
3095
3096
3097/**
3098 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3099 * not.
3100 *
3101 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3102 * @param a_pCtx The CPU context.
3103 */
3104#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3105# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3106 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3107 ? (a_pCtx)->eflags.u \
3108 : CPUMRawGetEFlags(a_pVCpu) )
3109#else
3110# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3111 ( (a_pCtx)->eflags.u )
3112#endif
3113
3114/**
3115 * Updates the EFLAGS in the correct manner wrt. PATM.
3116 *
3117 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3118 * @param a_pCtx The CPU context.
3119 * @param a_fEfl The new EFLAGS.
3120 */
3121#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3122# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3123 do { \
3124 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3125 (a_pCtx)->eflags.u = (a_fEfl); \
3126 else \
3127 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3128 } while (0)
3129#else
3130# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3131 do { \
3132 (a_pCtx)->eflags.u = (a_fEfl); \
3133 } while (0)
3134#endif
3135
3136
3137/** @} */
3138
3139/** @name Raising Exceptions.
3140 *
3141 * @{
3142 */
3143
3144/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3145 * @{ */
3146/** CPU exception. */
3147#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3148/** External interrupt (from PIC, APIC, whatever). */
3149#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3150/** Software interrupt (int or into, not bound).
3151 * Returns to the following instruction */
3152#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3153/** Takes an error code. */
3154#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3155/** Takes a CR2. */
3156#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3157/** Generated by the breakpoint instruction. */
3158#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3159/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3160#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3161/** @} */
3162
3163
3164/**
3165 * Loads the specified stack far pointer from the TSS.
3166 *
3167 * @returns VBox strict status code.
3168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3169 * @param pCtx The CPU context.
3170 * @param uCpl The CPL to load the stack for.
3171 * @param pSelSS Where to return the new stack segment.
3172 * @param puEsp Where to return the new stack pointer.
3173 */
3174IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3175 PRTSEL pSelSS, uint32_t *puEsp)
3176{
3177 VBOXSTRICTRC rcStrict;
3178 Assert(uCpl < 4);
3179
3180 switch (pCtx->tr.Attr.n.u4Type)
3181 {
3182 /*
3183 * 16-bit TSS (X86TSS16).
3184 */
3185 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3186 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3187 {
3188 uint32_t off = uCpl * 4 + 2;
3189 if (off + 4 <= pCtx->tr.u32Limit)
3190 {
3191 /** @todo check actual access pattern here. */
3192 uint32_t u32Tmp = 0; /* gcc maybe... */
3193 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3194 if (rcStrict == VINF_SUCCESS)
3195 {
3196 *puEsp = RT_LOWORD(u32Tmp);
3197 *pSelSS = RT_HIWORD(u32Tmp);
3198 return VINF_SUCCESS;
3199 }
3200 }
3201 else
3202 {
3203 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3204 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3205 }
3206 break;
3207 }
3208
3209 /*
3210 * 32-bit TSS (X86TSS32).
3211 */
3212 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3213 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3214 {
3215 uint32_t off = uCpl * 8 + 4;
3216 if (off + 7 <= pCtx->tr.u32Limit)
3217 {
3218/** @todo check actual access pattern here. */
3219 uint64_t u64Tmp;
3220 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3221 if (rcStrict == VINF_SUCCESS)
3222 {
3223 *puEsp = u64Tmp & UINT32_MAX;
3224 *pSelSS = (RTSEL)(u64Tmp >> 32);
3225 return VINF_SUCCESS;
3226 }
3227 }
3228 else
3229 {
3230 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3231 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3232 }
3233 break;
3234 }
3235
3236 default:
3237 AssertFailed();
3238 rcStrict = VERR_IEM_IPE_4;
3239 break;
3240 }
3241
3242 *puEsp = 0; /* make gcc happy */
3243 *pSelSS = 0; /* make gcc happy */
3244 return rcStrict;
3245}
3246
3247
3248/**
3249 * Loads the specified stack pointer from the 64-bit TSS.
3250 *
3251 * @returns VBox strict status code.
3252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3253 * @param pCtx The CPU context.
3254 * @param uCpl The CPL to load the stack for.
3255 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3256 * @param puRsp Where to return the new stack pointer.
3257 */
3258IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3259{
3260 Assert(uCpl < 4);
3261 Assert(uIst < 8);
3262 *puRsp = 0; /* make gcc happy */
3263
3264 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3265
3266 uint32_t off;
3267 if (uIst)
3268 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3269 else
3270 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3271 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3272 {
3273 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3274 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3275 }
3276
3277 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3278}
3279
3280
3281/**
3282 * Adjust the CPU state according to the exception being raised.
3283 *
3284 * @param pCtx The CPU context.
3285 * @param u8Vector The exception that has been raised.
3286 */
3287DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3288{
3289 switch (u8Vector)
3290 {
3291 case X86_XCPT_DB:
3292 pCtx->dr[7] &= ~X86_DR7_GD;
3293 break;
3294 /** @todo Read the AMD and Intel exception reference... */
3295 }
3296}
3297
3298
3299/**
3300 * Implements exceptions and interrupts for real mode.
3301 *
3302 * @returns VBox strict status code.
3303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3304 * @param pCtx The CPU context.
3305 * @param cbInstr The number of bytes to offset rIP by in the return
3306 * address.
3307 * @param u8Vector The interrupt / exception vector number.
3308 * @param fFlags The flags.
3309 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3310 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3311 */
3312IEM_STATIC VBOXSTRICTRC
3313iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3314 PCPUMCTX pCtx,
3315 uint8_t cbInstr,
3316 uint8_t u8Vector,
3317 uint32_t fFlags,
3318 uint16_t uErr,
3319 uint64_t uCr2)
3320{
3321 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3322 NOREF(uErr); NOREF(uCr2);
3323
3324 /*
3325 * Read the IDT entry.
3326 */
3327 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3328 {
3329 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3330 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3331 }
3332 RTFAR16 Idte;
3333 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3334 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3335 return rcStrict;
3336
3337 /*
3338 * Push the stack frame.
3339 */
3340 uint16_t *pu16Frame;
3341 uint64_t uNewRsp;
3342 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3343 if (rcStrict != VINF_SUCCESS)
3344 return rcStrict;
3345
3346 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3347#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3348 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3349 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3350 fEfl |= UINT16_C(0xf000);
3351#endif
3352 pu16Frame[2] = (uint16_t)fEfl;
3353 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3354 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3355 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3356 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3357 return rcStrict;
3358
3359 /*
3360 * Load the vector address into cs:ip and make exception specific state
3361 * adjustments.
3362 */
3363 pCtx->cs.Sel = Idte.sel;
3364 pCtx->cs.ValidSel = Idte.sel;
3365 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3366 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3367 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3368 pCtx->rip = Idte.off;
3369 fEfl &= ~X86_EFL_IF;
3370 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3371
3372 /** @todo do we actually do this in real mode? */
3373 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3374 iemRaiseXcptAdjustState(pCtx, u8Vector);
3375
3376 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3377}
3378
3379
3380/**
3381 * Loads a NULL data selector into when coming from V8086 mode.
3382 *
3383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3384 * @param pSReg Pointer to the segment register.
3385 */
3386IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3387{
3388 pSReg->Sel = 0;
3389 pSReg->ValidSel = 0;
3390 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3391 {
3392 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3393 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3394 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3395 }
3396 else
3397 {
3398 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3399 /** @todo check this on AMD-V */
3400 pSReg->u64Base = 0;
3401 pSReg->u32Limit = 0;
3402 }
3403}
3404
3405
3406/**
3407 * Loads a segment selector during a task switch in V8086 mode.
3408 *
3409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3410 * @param pSReg Pointer to the segment register.
3411 * @param uSel The selector value to load.
3412 */
3413IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3414{
3415 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3416 pSReg->Sel = uSel;
3417 pSReg->ValidSel = uSel;
3418 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3419 pSReg->u64Base = uSel << 4;
3420 pSReg->u32Limit = 0xffff;
3421 pSReg->Attr.u = 0xf3;
3422}
3423
3424
3425/**
3426 * Loads a NULL data selector into a selector register, both the hidden and
3427 * visible parts, in protected mode.
3428 *
3429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3430 * @param pSReg Pointer to the segment register.
3431 * @param uRpl The RPL.
3432 */
3433IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3434{
3435 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3436 * data selector in protected mode. */
3437 pSReg->Sel = uRpl;
3438 pSReg->ValidSel = uRpl;
3439 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3440 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3441 {
3442 /* VT-x (Intel 3960x) observed doing something like this. */
3443 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3444 pSReg->u32Limit = UINT32_MAX;
3445 pSReg->u64Base = 0;
3446 }
3447 else
3448 {
3449 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3450 pSReg->u32Limit = 0;
3451 pSReg->u64Base = 0;
3452 }
3453}
3454
3455
3456/**
3457 * Loads a segment selector during a task switch in protected mode.
3458 *
3459 * In this task switch scenario, we would throw \#TS exceptions rather than
3460 * \#GPs.
3461 *
3462 * @returns VBox strict status code.
3463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3464 * @param pSReg Pointer to the segment register.
3465 * @param uSel The new selector value.
3466 *
3467 * @remarks This does _not_ handle CS or SS.
3468 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3469 */
3470IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3471{
3472 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3473
3474 /* Null data selector. */
3475 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3476 {
3477 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3478 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3479 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3480 return VINF_SUCCESS;
3481 }
3482
3483 /* Fetch the descriptor. */
3484 IEMSELDESC Desc;
3485 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3486 if (rcStrict != VINF_SUCCESS)
3487 {
3488 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3489 VBOXSTRICTRC_VAL(rcStrict)));
3490 return rcStrict;
3491 }
3492
3493 /* Must be a data segment or readable code segment. */
3494 if ( !Desc.Legacy.Gen.u1DescType
3495 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3496 {
3497 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3498 Desc.Legacy.Gen.u4Type));
3499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3500 }
3501
3502 /* Check privileges for data segments and non-conforming code segments. */
3503 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3504 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3505 {
3506 /* The RPL and the new CPL must be less than or equal to the DPL. */
3507 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3508 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3509 {
3510 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3511 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3512 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3513 }
3514 }
3515
3516 /* Is it there? */
3517 if (!Desc.Legacy.Gen.u1Present)
3518 {
3519 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3520 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3521 }
3522
3523 /* The base and limit. */
3524 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3525 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3526
3527 /*
3528 * Ok, everything checked out fine. Now set the accessed bit before
3529 * committing the result into the registers.
3530 */
3531 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3532 {
3533 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3534 if (rcStrict != VINF_SUCCESS)
3535 return rcStrict;
3536 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3537 }
3538
3539 /* Commit */
3540 pSReg->Sel = uSel;
3541 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3542 pSReg->u32Limit = cbLimit;
3543 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3544 pSReg->ValidSel = uSel;
3545 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3546 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3547 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3548
3549 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3550 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3551 return VINF_SUCCESS;
3552}
3553
3554
3555/**
3556 * Performs a task switch.
3557 *
3558 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3559 * caller is responsible for performing the necessary checks (like DPL, TSS
3560 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3561 * reference for JMP, CALL, IRET.
3562 *
3563 * If the task switch is the due to a software interrupt or hardware exception,
3564 * the caller is responsible for validating the TSS selector and descriptor. See
3565 * Intel Instruction reference for INT n.
3566 *
3567 * @returns VBox strict status code.
3568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3569 * @param pCtx The CPU context.
3570 * @param enmTaskSwitch What caused this task switch.
3571 * @param uNextEip The EIP effective after the task switch.
3572 * @param fFlags The flags.
3573 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3574 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3575 * @param SelTSS The TSS selector of the new task.
3576 * @param pNewDescTSS Pointer to the new TSS descriptor.
3577 */
3578IEM_STATIC VBOXSTRICTRC
3579iemTaskSwitch(PVMCPU pVCpu,
3580 PCPUMCTX pCtx,
3581 IEMTASKSWITCH enmTaskSwitch,
3582 uint32_t uNextEip,
3583 uint32_t fFlags,
3584 uint16_t uErr,
3585 uint64_t uCr2,
3586 RTSEL SelTSS,
3587 PIEMSELDESC pNewDescTSS)
3588{
3589 Assert(!IEM_IS_REAL_MODE(pVCpu));
3590 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3591
3592 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3593 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3594 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3595 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3596 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3597
3598 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3599 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3600
3601 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3602 fIsNewTSS386, pCtx->eip, uNextEip));
3603
3604 /* Update CR2 in case it's a page-fault. */
3605 /** @todo This should probably be done much earlier in IEM/PGM. See
3606 * @bugref{5653#c49}. */
3607 if (fFlags & IEM_XCPT_FLAGS_CR2)
3608 pCtx->cr2 = uCr2;
3609
3610 /*
3611 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3612 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3613 */
3614 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3615 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3616 if (uNewTSSLimit < uNewTSSLimitMin)
3617 {
3618 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3619 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3620 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3621 }
3622
3623 /*
3624 * Check the current TSS limit. The last written byte to the current TSS during the
3625 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3626 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3627 *
3628 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3629 * end up with smaller than "legal" TSS limits.
3630 */
3631 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3632 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3633 if (uCurTSSLimit < uCurTSSLimitMin)
3634 {
3635 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3636 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3637 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3638 }
3639
3640 /*
3641 * Verify that the new TSS can be accessed and map it. Map only the required contents
3642 * and not the entire TSS.
3643 */
3644 void *pvNewTSS;
3645 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3646 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3647 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3648 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3649 * not perform correct translation if this happens. See Intel spec. 7.2.1
3650 * "Task-State Segment" */
3651 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3652 if (rcStrict != VINF_SUCCESS)
3653 {
3654 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3655 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3656 return rcStrict;
3657 }
3658
3659 /*
3660 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3661 */
3662 uint32_t u32EFlags = pCtx->eflags.u32;
3663 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3664 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3665 {
3666 PX86DESC pDescCurTSS;
3667 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3668 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3669 if (rcStrict != VINF_SUCCESS)
3670 {
3671 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3672 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3673 return rcStrict;
3674 }
3675
3676 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3677 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3678 if (rcStrict != VINF_SUCCESS)
3679 {
3680 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3681 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3682 return rcStrict;
3683 }
3684
3685 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3686 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3687 {
3688 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3689 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3690 u32EFlags &= ~X86_EFL_NT;
3691 }
3692 }
3693
3694 /*
3695 * Save the CPU state into the current TSS.
3696 */
3697 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3698 if (GCPtrNewTSS == GCPtrCurTSS)
3699 {
3700 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3701 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3702 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3703 }
3704 if (fIsNewTSS386)
3705 {
3706 /*
3707 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3708 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3709 */
3710 void *pvCurTSS32;
3711 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3712 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3713 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3714 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3715 if (rcStrict != VINF_SUCCESS)
3716 {
3717 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3718 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3719 return rcStrict;
3720 }
3721
3722 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3723 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3724 pCurTSS32->eip = uNextEip;
3725 pCurTSS32->eflags = u32EFlags;
3726 pCurTSS32->eax = pCtx->eax;
3727 pCurTSS32->ecx = pCtx->ecx;
3728 pCurTSS32->edx = pCtx->edx;
3729 pCurTSS32->ebx = pCtx->ebx;
3730 pCurTSS32->esp = pCtx->esp;
3731 pCurTSS32->ebp = pCtx->ebp;
3732 pCurTSS32->esi = pCtx->esi;
3733 pCurTSS32->edi = pCtx->edi;
3734 pCurTSS32->es = pCtx->es.Sel;
3735 pCurTSS32->cs = pCtx->cs.Sel;
3736 pCurTSS32->ss = pCtx->ss.Sel;
3737 pCurTSS32->ds = pCtx->ds.Sel;
3738 pCurTSS32->fs = pCtx->fs.Sel;
3739 pCurTSS32->gs = pCtx->gs.Sel;
3740
3741 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3742 if (rcStrict != VINF_SUCCESS)
3743 {
3744 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3745 VBOXSTRICTRC_VAL(rcStrict)));
3746 return rcStrict;
3747 }
3748 }
3749 else
3750 {
3751 /*
3752 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3753 */
3754 void *pvCurTSS16;
3755 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3756 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3757 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3758 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3759 if (rcStrict != VINF_SUCCESS)
3760 {
3761 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3762 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3767 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3768 pCurTSS16->ip = uNextEip;
3769 pCurTSS16->flags = u32EFlags;
3770 pCurTSS16->ax = pCtx->ax;
3771 pCurTSS16->cx = pCtx->cx;
3772 pCurTSS16->dx = pCtx->dx;
3773 pCurTSS16->bx = pCtx->bx;
3774 pCurTSS16->sp = pCtx->sp;
3775 pCurTSS16->bp = pCtx->bp;
3776 pCurTSS16->si = pCtx->si;
3777 pCurTSS16->di = pCtx->di;
3778 pCurTSS16->es = pCtx->es.Sel;
3779 pCurTSS16->cs = pCtx->cs.Sel;
3780 pCurTSS16->ss = pCtx->ss.Sel;
3781 pCurTSS16->ds = pCtx->ds.Sel;
3782
3783 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3784 if (rcStrict != VINF_SUCCESS)
3785 {
3786 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3787 VBOXSTRICTRC_VAL(rcStrict)));
3788 return rcStrict;
3789 }
3790 }
3791
3792 /*
3793 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3794 */
3795 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3796 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3797 {
3798 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3799 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3800 pNewTSS->selPrev = pCtx->tr.Sel;
3801 }
3802
3803 /*
3804 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3805 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3806 */
3807 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3808 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3809 bool fNewDebugTrap;
3810 if (fIsNewTSS386)
3811 {
3812 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3813 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3814 uNewEip = pNewTSS32->eip;
3815 uNewEflags = pNewTSS32->eflags;
3816 uNewEax = pNewTSS32->eax;
3817 uNewEcx = pNewTSS32->ecx;
3818 uNewEdx = pNewTSS32->edx;
3819 uNewEbx = pNewTSS32->ebx;
3820 uNewEsp = pNewTSS32->esp;
3821 uNewEbp = pNewTSS32->ebp;
3822 uNewEsi = pNewTSS32->esi;
3823 uNewEdi = pNewTSS32->edi;
3824 uNewES = pNewTSS32->es;
3825 uNewCS = pNewTSS32->cs;
3826 uNewSS = pNewTSS32->ss;
3827 uNewDS = pNewTSS32->ds;
3828 uNewFS = pNewTSS32->fs;
3829 uNewGS = pNewTSS32->gs;
3830 uNewLdt = pNewTSS32->selLdt;
3831 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3832 }
3833 else
3834 {
3835 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3836 uNewCr3 = 0;
3837 uNewEip = pNewTSS16->ip;
3838 uNewEflags = pNewTSS16->flags;
3839 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3840 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3841 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3842 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3843 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3844 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3845 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3846 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3847 uNewES = pNewTSS16->es;
3848 uNewCS = pNewTSS16->cs;
3849 uNewSS = pNewTSS16->ss;
3850 uNewDS = pNewTSS16->ds;
3851 uNewFS = 0;
3852 uNewGS = 0;
3853 uNewLdt = pNewTSS16->selLdt;
3854 fNewDebugTrap = false;
3855 }
3856
3857 if (GCPtrNewTSS == GCPtrCurTSS)
3858 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3859 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3860
3861 /*
3862 * We're done accessing the new TSS.
3863 */
3864 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3865 if (rcStrict != VINF_SUCCESS)
3866 {
3867 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3868 return rcStrict;
3869 }
3870
3871 /*
3872 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3873 */
3874 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3875 {
3876 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3877 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3878 if (rcStrict != VINF_SUCCESS)
3879 {
3880 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3881 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3882 return rcStrict;
3883 }
3884
3885 /* Check that the descriptor indicates the new TSS is available (not busy). */
3886 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3887 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3888 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3889
3890 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3891 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3892 if (rcStrict != VINF_SUCCESS)
3893 {
3894 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3895 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898 }
3899
3900 /*
3901 * From this point on, we're technically in the new task. We will defer exceptions
3902 * until the completion of the task switch but before executing any instructions in the new task.
3903 */
3904 pCtx->tr.Sel = SelTSS;
3905 pCtx->tr.ValidSel = SelTSS;
3906 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3907 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3908 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3909 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3910 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3911
3912 /* Set the busy bit in TR. */
3913 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3914 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3915 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3916 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3917 {
3918 uNewEflags |= X86_EFL_NT;
3919 }
3920
3921 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3922 pCtx->cr0 |= X86_CR0_TS;
3923 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3924
3925 pCtx->eip = uNewEip;
3926 pCtx->eax = uNewEax;
3927 pCtx->ecx = uNewEcx;
3928 pCtx->edx = uNewEdx;
3929 pCtx->ebx = uNewEbx;
3930 pCtx->esp = uNewEsp;
3931 pCtx->ebp = uNewEbp;
3932 pCtx->esi = uNewEsi;
3933 pCtx->edi = uNewEdi;
3934
3935 uNewEflags &= X86_EFL_LIVE_MASK;
3936 uNewEflags |= X86_EFL_RA1_MASK;
3937 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3938
3939 /*
3940 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3941 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3942 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3943 */
3944 pCtx->es.Sel = uNewES;
3945 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3946
3947 pCtx->cs.Sel = uNewCS;
3948 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3949
3950 pCtx->ss.Sel = uNewSS;
3951 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3952
3953 pCtx->ds.Sel = uNewDS;
3954 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3955
3956 pCtx->fs.Sel = uNewFS;
3957 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3958
3959 pCtx->gs.Sel = uNewGS;
3960 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3961 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3962
3963 pCtx->ldtr.Sel = uNewLdt;
3964 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3965 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3966 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3967
3968 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3969 {
3970 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3971 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3972 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3973 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3974 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3975 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3976 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3977 }
3978
3979 /*
3980 * Switch CR3 for the new task.
3981 */
3982 if ( fIsNewTSS386
3983 && (pCtx->cr0 & X86_CR0_PG))
3984 {
3985 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3986 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3987 {
3988 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3989 AssertRCSuccessReturn(rc, rc);
3990 }
3991 else
3992 pCtx->cr3 = uNewCr3;
3993
3994 /* Inform PGM. */
3995 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3996 {
3997 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3998 AssertRCReturn(rc, rc);
3999 /* ignore informational status codes */
4000 }
4001 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4002 }
4003
4004 /*
4005 * Switch LDTR for the new task.
4006 */
4007 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4008 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4009 else
4010 {
4011 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4012
4013 IEMSELDESC DescNewLdt;
4014 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4015 if (rcStrict != VINF_SUCCESS)
4016 {
4017 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4018 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4019 return rcStrict;
4020 }
4021 if ( !DescNewLdt.Legacy.Gen.u1Present
4022 || DescNewLdt.Legacy.Gen.u1DescType
4023 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4024 {
4025 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4026 uNewLdt, DescNewLdt.Legacy.u));
4027 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4028 }
4029
4030 pCtx->ldtr.ValidSel = uNewLdt;
4031 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4032 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4033 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4034 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4035 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4036 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4038 }
4039
4040 IEMSELDESC DescSS;
4041 if (IEM_IS_V86_MODE(pVCpu))
4042 {
4043 pVCpu->iem.s.uCpl = 3;
4044 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
4045 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
4046 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
4047 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
4048 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
4049 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
4050 }
4051 else
4052 {
4053 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4054
4055 /*
4056 * Load the stack segment for the new task.
4057 */
4058 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4059 {
4060 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4061 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4062 }
4063
4064 /* Fetch the descriptor. */
4065 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4066 if (rcStrict != VINF_SUCCESS)
4067 {
4068 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4069 VBOXSTRICTRC_VAL(rcStrict)));
4070 return rcStrict;
4071 }
4072
4073 /* SS must be a data segment and writable. */
4074 if ( !DescSS.Legacy.Gen.u1DescType
4075 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4076 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4077 {
4078 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4079 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4080 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4081 }
4082
4083 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4084 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4085 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4086 {
4087 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4088 uNewCpl));
4089 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4090 }
4091
4092 /* Is it there? */
4093 if (!DescSS.Legacy.Gen.u1Present)
4094 {
4095 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4096 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4097 }
4098
4099 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4100 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4101
4102 /* Set the accessed bit before committing the result into SS. */
4103 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4104 {
4105 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4106 if (rcStrict != VINF_SUCCESS)
4107 return rcStrict;
4108 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4109 }
4110
4111 /* Commit SS. */
4112 pCtx->ss.Sel = uNewSS;
4113 pCtx->ss.ValidSel = uNewSS;
4114 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4115 pCtx->ss.u32Limit = cbLimit;
4116 pCtx->ss.u64Base = u64Base;
4117 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4119
4120 /* CPL has changed, update IEM before loading rest of segments. */
4121 pVCpu->iem.s.uCpl = uNewCpl;
4122
4123 /*
4124 * Load the data segments for the new task.
4125 */
4126 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4127 if (rcStrict != VINF_SUCCESS)
4128 return rcStrict;
4129 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4130 if (rcStrict != VINF_SUCCESS)
4131 return rcStrict;
4132 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4133 if (rcStrict != VINF_SUCCESS)
4134 return rcStrict;
4135 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4136 if (rcStrict != VINF_SUCCESS)
4137 return rcStrict;
4138
4139 /*
4140 * Load the code segment for the new task.
4141 */
4142 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4143 {
4144 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4145 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4146 }
4147
4148 /* Fetch the descriptor. */
4149 IEMSELDESC DescCS;
4150 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4151 if (rcStrict != VINF_SUCCESS)
4152 {
4153 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156
4157 /* CS must be a code segment. */
4158 if ( !DescCS.Legacy.Gen.u1DescType
4159 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4160 {
4161 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4162 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4163 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4164 }
4165
4166 /* For conforming CS, DPL must be less than or equal to the RPL. */
4167 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4168 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4169 {
4170 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4171 DescCS.Legacy.Gen.u2Dpl));
4172 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4173 }
4174
4175 /* For non-conforming CS, DPL must match RPL. */
4176 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4177 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4178 {
4179 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4180 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4181 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4182 }
4183
4184 /* Is it there? */
4185 if (!DescCS.Legacy.Gen.u1Present)
4186 {
4187 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4188 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4189 }
4190
4191 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4192 u64Base = X86DESC_BASE(&DescCS.Legacy);
4193
4194 /* Set the accessed bit before committing the result into CS. */
4195 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4196 {
4197 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4198 if (rcStrict != VINF_SUCCESS)
4199 return rcStrict;
4200 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4201 }
4202
4203 /* Commit CS. */
4204 pCtx->cs.Sel = uNewCS;
4205 pCtx->cs.ValidSel = uNewCS;
4206 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4207 pCtx->cs.u32Limit = cbLimit;
4208 pCtx->cs.u64Base = u64Base;
4209 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4211 }
4212
4213 /** @todo Debug trap. */
4214 if (fIsNewTSS386 && fNewDebugTrap)
4215 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4216
4217 /*
4218 * Construct the error code masks based on what caused this task switch.
4219 * See Intel Instruction reference for INT.
4220 */
4221 uint16_t uExt;
4222 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4223 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4224 {
4225 uExt = 1;
4226 }
4227 else
4228 uExt = 0;
4229
4230 /*
4231 * Push any error code on to the new stack.
4232 */
4233 if (fFlags & IEM_XCPT_FLAGS_ERR)
4234 {
4235 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4236 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4237 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4238
4239 /* Check that there is sufficient space on the stack. */
4240 /** @todo Factor out segment limit checking for normal/expand down segments
4241 * into a separate function. */
4242 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4243 {
4244 if ( pCtx->esp - 1 > cbLimitSS
4245 || pCtx->esp < cbStackFrame)
4246 {
4247 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4248 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4249 cbStackFrame));
4250 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4251 }
4252 }
4253 else
4254 {
4255 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4256 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4257 {
4258 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4259 cbStackFrame));
4260 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4261 }
4262 }
4263
4264
4265 if (fIsNewTSS386)
4266 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4267 else
4268 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4269 if (rcStrict != VINF_SUCCESS)
4270 {
4271 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
4272 VBOXSTRICTRC_VAL(rcStrict)));
4273 return rcStrict;
4274 }
4275 }
4276
4277 /* Check the new EIP against the new CS limit. */
4278 if (pCtx->eip > pCtx->cs.u32Limit)
4279 {
4280 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4281 pCtx->eip, pCtx->cs.u32Limit));
4282 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4283 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4284 }
4285
4286 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4287 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4288}
4289
4290
4291/**
4292 * Implements exceptions and interrupts for protected mode.
4293 *
4294 * @returns VBox strict status code.
4295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4296 * @param pCtx The CPU context.
4297 * @param cbInstr The number of bytes to offset rIP by in the return
4298 * address.
4299 * @param u8Vector The interrupt / exception vector number.
4300 * @param fFlags The flags.
4301 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4302 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4303 */
4304IEM_STATIC VBOXSTRICTRC
4305iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4306 PCPUMCTX pCtx,
4307 uint8_t cbInstr,
4308 uint8_t u8Vector,
4309 uint32_t fFlags,
4310 uint16_t uErr,
4311 uint64_t uCr2)
4312{
4313 /*
4314 * Read the IDT entry.
4315 */
4316 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4317 {
4318 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4319 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4320 }
4321 X86DESC Idte;
4322 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4323 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4324 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4325 return rcStrict;
4326 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4327 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4328 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4329
4330 /*
4331 * Check the descriptor type, DPL and such.
4332 * ASSUMES this is done in the same order as described for call-gate calls.
4333 */
4334 if (Idte.Gate.u1DescType)
4335 {
4336 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4337 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4338 }
4339 bool fTaskGate = false;
4340 uint8_t f32BitGate = true;
4341 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4342 switch (Idte.Gate.u4Type)
4343 {
4344 case X86_SEL_TYPE_SYS_UNDEFINED:
4345 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4346 case X86_SEL_TYPE_SYS_LDT:
4347 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4348 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4349 case X86_SEL_TYPE_SYS_UNDEFINED2:
4350 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4351 case X86_SEL_TYPE_SYS_UNDEFINED3:
4352 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4353 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4354 case X86_SEL_TYPE_SYS_UNDEFINED4:
4355 {
4356 /** @todo check what actually happens when the type is wrong...
4357 * esp. call gates. */
4358 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4359 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4360 }
4361
4362 case X86_SEL_TYPE_SYS_286_INT_GATE:
4363 f32BitGate = false;
4364 case X86_SEL_TYPE_SYS_386_INT_GATE:
4365 fEflToClear |= X86_EFL_IF;
4366 break;
4367
4368 case X86_SEL_TYPE_SYS_TASK_GATE:
4369 fTaskGate = true;
4370#ifndef IEM_IMPLEMENTS_TASKSWITCH
4371 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4372#endif
4373 break;
4374
4375 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4376 f32BitGate = false;
4377 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4378 break;
4379
4380 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4381 }
4382
4383 /* Check DPL against CPL if applicable. */
4384 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4385 {
4386 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4387 {
4388 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4389 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4390 }
4391 }
4392
4393 /* Is it there? */
4394 if (!Idte.Gate.u1Present)
4395 {
4396 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4397 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4398 }
4399
4400 /* Is it a task-gate? */
4401 if (fTaskGate)
4402 {
4403 /*
4404 * Construct the error code masks based on what caused this task switch.
4405 * See Intel Instruction reference for INT.
4406 */
4407 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4408 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4409 RTSEL SelTSS = Idte.Gate.u16Sel;
4410
4411 /*
4412 * Fetch the TSS descriptor in the GDT.
4413 */
4414 IEMSELDESC DescTSS;
4415 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4416 if (rcStrict != VINF_SUCCESS)
4417 {
4418 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4419 VBOXSTRICTRC_VAL(rcStrict)));
4420 return rcStrict;
4421 }
4422
4423 /* The TSS descriptor must be a system segment and be available (not busy). */
4424 if ( DescTSS.Legacy.Gen.u1DescType
4425 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4426 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4427 {
4428 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4429 u8Vector, SelTSS, DescTSS.Legacy.au64));
4430 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4431 }
4432
4433 /* The TSS must be present. */
4434 if (!DescTSS.Legacy.Gen.u1Present)
4435 {
4436 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4437 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4438 }
4439
4440 /* Do the actual task switch. */
4441 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4442 }
4443
4444 /* A null CS is bad. */
4445 RTSEL NewCS = Idte.Gate.u16Sel;
4446 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4447 {
4448 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4449 return iemRaiseGeneralProtectionFault0(pVCpu);
4450 }
4451
4452 /* Fetch the descriptor for the new CS. */
4453 IEMSELDESC DescCS;
4454 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4455 if (rcStrict != VINF_SUCCESS)
4456 {
4457 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4458 return rcStrict;
4459 }
4460
4461 /* Must be a code segment. */
4462 if (!DescCS.Legacy.Gen.u1DescType)
4463 {
4464 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4465 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4466 }
4467 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4468 {
4469 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4470 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4471 }
4472
4473 /* Don't allow lowering the privilege level. */
4474 /** @todo Does the lowering of privileges apply to software interrupts
4475 * only? This has bearings on the more-privileged or
4476 * same-privilege stack behavior further down. A testcase would
4477 * be nice. */
4478 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4479 {
4480 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4481 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4482 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4483 }
4484
4485 /* Make sure the selector is present. */
4486 if (!DescCS.Legacy.Gen.u1Present)
4487 {
4488 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4489 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4490 }
4491
4492 /* Check the new EIP against the new CS limit. */
4493 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4494 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4495 ? Idte.Gate.u16OffsetLow
4496 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4497 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4498 if (uNewEip > cbLimitCS)
4499 {
4500 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4501 u8Vector, uNewEip, cbLimitCS, NewCS));
4502 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4503 }
4504
4505 /* Calc the flag image to push. */
4506 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4507 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4508 fEfl &= ~X86_EFL_RF;
4509 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4510 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4511
4512 /* From V8086 mode only go to CPL 0. */
4513 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4514 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4515 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4516 {
4517 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4518 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4519 }
4520
4521 /*
4522 * If the privilege level changes, we need to get a new stack from the TSS.
4523 * This in turns means validating the new SS and ESP...
4524 */
4525 if (uNewCpl != pVCpu->iem.s.uCpl)
4526 {
4527 RTSEL NewSS;
4528 uint32_t uNewEsp;
4529 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4530 if (rcStrict != VINF_SUCCESS)
4531 return rcStrict;
4532
4533 IEMSELDESC DescSS;
4534 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4535 if (rcStrict != VINF_SUCCESS)
4536 return rcStrict;
4537
4538 /* Check that there is sufficient space for the stack frame. */
4539 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4540 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4541 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4542 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4543
4544 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4545 {
4546 if ( uNewEsp - 1 > cbLimitSS
4547 || uNewEsp < cbStackFrame)
4548 {
4549 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4550 u8Vector, NewSS, uNewEsp, cbStackFrame));
4551 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4552 }
4553 }
4554 else
4555 {
4556 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4557 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4558 {
4559 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4560 u8Vector, NewSS, uNewEsp, cbStackFrame));
4561 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4562 }
4563 }
4564
4565 /*
4566 * Start making changes.
4567 */
4568
4569 /* Set the new CPL so that stack accesses use it. */
4570 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4571 pVCpu->iem.s.uCpl = uNewCpl;
4572
4573 /* Create the stack frame. */
4574 RTPTRUNION uStackFrame;
4575 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4576 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4577 if (rcStrict != VINF_SUCCESS)
4578 return rcStrict;
4579 void * const pvStackFrame = uStackFrame.pv;
4580 if (f32BitGate)
4581 {
4582 if (fFlags & IEM_XCPT_FLAGS_ERR)
4583 *uStackFrame.pu32++ = uErr;
4584 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4585 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4586 uStackFrame.pu32[2] = fEfl;
4587 uStackFrame.pu32[3] = pCtx->esp;
4588 uStackFrame.pu32[4] = pCtx->ss.Sel;
4589 if (fEfl & X86_EFL_VM)
4590 {
4591 uStackFrame.pu32[1] = pCtx->cs.Sel;
4592 uStackFrame.pu32[5] = pCtx->es.Sel;
4593 uStackFrame.pu32[6] = pCtx->ds.Sel;
4594 uStackFrame.pu32[7] = pCtx->fs.Sel;
4595 uStackFrame.pu32[8] = pCtx->gs.Sel;
4596 }
4597 }
4598 else
4599 {
4600 if (fFlags & IEM_XCPT_FLAGS_ERR)
4601 *uStackFrame.pu16++ = uErr;
4602 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4603 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4604 uStackFrame.pu16[2] = fEfl;
4605 uStackFrame.pu16[3] = pCtx->sp;
4606 uStackFrame.pu16[4] = pCtx->ss.Sel;
4607 if (fEfl & X86_EFL_VM)
4608 {
4609 uStackFrame.pu16[1] = pCtx->cs.Sel;
4610 uStackFrame.pu16[5] = pCtx->es.Sel;
4611 uStackFrame.pu16[6] = pCtx->ds.Sel;
4612 uStackFrame.pu16[7] = pCtx->fs.Sel;
4613 uStackFrame.pu16[8] = pCtx->gs.Sel;
4614 }
4615 }
4616 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4617 if (rcStrict != VINF_SUCCESS)
4618 return rcStrict;
4619
4620 /* Mark the selectors 'accessed' (hope this is the correct time). */
4621 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4622 * after pushing the stack frame? (Write protect the gdt + stack to
4623 * find out.) */
4624 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4625 {
4626 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4627 if (rcStrict != VINF_SUCCESS)
4628 return rcStrict;
4629 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4630 }
4631
4632 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4633 {
4634 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4635 if (rcStrict != VINF_SUCCESS)
4636 return rcStrict;
4637 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4638 }
4639
4640 /*
4641 * Start comitting the register changes (joins with the DPL=CPL branch).
4642 */
4643 pCtx->ss.Sel = NewSS;
4644 pCtx->ss.ValidSel = NewSS;
4645 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4646 pCtx->ss.u32Limit = cbLimitSS;
4647 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4648 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4649 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4650 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4651 * SP is loaded).
4652 * Need to check the other combinations too:
4653 * - 16-bit TSS, 32-bit handler
4654 * - 32-bit TSS, 16-bit handler */
4655 if (!pCtx->ss.Attr.n.u1DefBig)
4656 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4657 else
4658 pCtx->rsp = uNewEsp - cbStackFrame;
4659
4660 if (fEfl & X86_EFL_VM)
4661 {
4662 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4663 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4664 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4665 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4666 }
4667 }
4668 /*
4669 * Same privilege, no stack change and smaller stack frame.
4670 */
4671 else
4672 {
4673 uint64_t uNewRsp;
4674 RTPTRUNION uStackFrame;
4675 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4676 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4677 if (rcStrict != VINF_SUCCESS)
4678 return rcStrict;
4679 void * const pvStackFrame = uStackFrame.pv;
4680
4681 if (f32BitGate)
4682 {
4683 if (fFlags & IEM_XCPT_FLAGS_ERR)
4684 *uStackFrame.pu32++ = uErr;
4685 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4686 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4687 uStackFrame.pu32[2] = fEfl;
4688 }
4689 else
4690 {
4691 if (fFlags & IEM_XCPT_FLAGS_ERR)
4692 *uStackFrame.pu16++ = uErr;
4693 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4694 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4695 uStackFrame.pu16[2] = fEfl;
4696 }
4697 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4698 if (rcStrict != VINF_SUCCESS)
4699 return rcStrict;
4700
4701 /* Mark the CS selector as 'accessed'. */
4702 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4703 {
4704 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4705 if (rcStrict != VINF_SUCCESS)
4706 return rcStrict;
4707 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4708 }
4709
4710 /*
4711 * Start committing the register changes (joins with the other branch).
4712 */
4713 pCtx->rsp = uNewRsp;
4714 }
4715
4716 /* ... register committing continues. */
4717 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4718 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4719 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4720 pCtx->cs.u32Limit = cbLimitCS;
4721 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4722 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4723
4724 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4725 fEfl &= ~fEflToClear;
4726 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4727
4728 if (fFlags & IEM_XCPT_FLAGS_CR2)
4729 pCtx->cr2 = uCr2;
4730
4731 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4732 iemRaiseXcptAdjustState(pCtx, u8Vector);
4733
4734 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4735}
4736
4737
4738/**
4739 * Implements exceptions and interrupts for long mode.
4740 *
4741 * @returns VBox strict status code.
4742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4743 * @param pCtx The CPU context.
4744 * @param cbInstr The number of bytes to offset rIP by in the return
4745 * address.
4746 * @param u8Vector The interrupt / exception vector number.
4747 * @param fFlags The flags.
4748 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4749 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4750 */
4751IEM_STATIC VBOXSTRICTRC
4752iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4753 PCPUMCTX pCtx,
4754 uint8_t cbInstr,
4755 uint8_t u8Vector,
4756 uint32_t fFlags,
4757 uint16_t uErr,
4758 uint64_t uCr2)
4759{
4760 /*
4761 * Read the IDT entry.
4762 */
4763 uint16_t offIdt = (uint16_t)u8Vector << 4;
4764 if (pCtx->idtr.cbIdt < offIdt + 7)
4765 {
4766 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4767 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4768 }
4769 X86DESC64 Idte;
4770 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4772 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4774 return rcStrict;
4775 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4776 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4777 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4778
4779 /*
4780 * Check the descriptor type, DPL and such.
4781 * ASSUMES this is done in the same order as described for call-gate calls.
4782 */
4783 if (Idte.Gate.u1DescType)
4784 {
4785 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4786 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4787 }
4788 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4789 switch (Idte.Gate.u4Type)
4790 {
4791 case AMD64_SEL_TYPE_SYS_INT_GATE:
4792 fEflToClear |= X86_EFL_IF;
4793 break;
4794 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4795 break;
4796
4797 default:
4798 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4799 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4800 }
4801
4802 /* Check DPL against CPL if applicable. */
4803 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4804 {
4805 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4806 {
4807 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4808 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4809 }
4810 }
4811
4812 /* Is it there? */
4813 if (!Idte.Gate.u1Present)
4814 {
4815 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4816 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4817 }
4818
4819 /* A null CS is bad. */
4820 RTSEL NewCS = Idte.Gate.u16Sel;
4821 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4822 {
4823 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4824 return iemRaiseGeneralProtectionFault0(pVCpu);
4825 }
4826
4827 /* Fetch the descriptor for the new CS. */
4828 IEMSELDESC DescCS;
4829 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4830 if (rcStrict != VINF_SUCCESS)
4831 {
4832 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4833 return rcStrict;
4834 }
4835
4836 /* Must be a 64-bit code segment. */
4837 if (!DescCS.Long.Gen.u1DescType)
4838 {
4839 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4840 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4841 }
4842 if ( !DescCS.Long.Gen.u1Long
4843 || DescCS.Long.Gen.u1DefBig
4844 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4845 {
4846 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4847 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4848 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4849 }
4850
4851 /* Don't allow lowering the privilege level. For non-conforming CS
4852 selectors, the CS.DPL sets the privilege level the trap/interrupt
4853 handler runs at. For conforming CS selectors, the CPL remains
4854 unchanged, but the CS.DPL must be <= CPL. */
4855 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4856 * when CPU in Ring-0. Result \#GP? */
4857 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4858 {
4859 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4860 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4861 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4862 }
4863
4864
4865 /* Make sure the selector is present. */
4866 if (!DescCS.Legacy.Gen.u1Present)
4867 {
4868 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4869 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4870 }
4871
4872 /* Check that the new RIP is canonical. */
4873 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4874 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4875 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4876 if (!IEM_IS_CANONICAL(uNewRip))
4877 {
4878 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4879 return iemRaiseGeneralProtectionFault0(pVCpu);
4880 }
4881
4882 /*
4883 * If the privilege level changes or if the IST isn't zero, we need to get
4884 * a new stack from the TSS.
4885 */
4886 uint64_t uNewRsp;
4887 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4888 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4889 if ( uNewCpl != pVCpu->iem.s.uCpl
4890 || Idte.Gate.u3IST != 0)
4891 {
4892 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4893 if (rcStrict != VINF_SUCCESS)
4894 return rcStrict;
4895 }
4896 else
4897 uNewRsp = pCtx->rsp;
4898 uNewRsp &= ~(uint64_t)0xf;
4899
4900 /*
4901 * Calc the flag image to push.
4902 */
4903 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4904 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4905 fEfl &= ~X86_EFL_RF;
4906 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4907 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4908
4909 /*
4910 * Start making changes.
4911 */
4912 /* Set the new CPL so that stack accesses use it. */
4913 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4914 pVCpu->iem.s.uCpl = uNewCpl;
4915
4916 /* Create the stack frame. */
4917 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4918 RTPTRUNION uStackFrame;
4919 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4920 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4921 if (rcStrict != VINF_SUCCESS)
4922 return rcStrict;
4923 void * const pvStackFrame = uStackFrame.pv;
4924
4925 if (fFlags & IEM_XCPT_FLAGS_ERR)
4926 *uStackFrame.pu64++ = uErr;
4927 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4928 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4929 uStackFrame.pu64[2] = fEfl;
4930 uStackFrame.pu64[3] = pCtx->rsp;
4931 uStackFrame.pu64[4] = pCtx->ss.Sel;
4932 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4933 if (rcStrict != VINF_SUCCESS)
4934 return rcStrict;
4935
4936 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4937 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4938 * after pushing the stack frame? (Write protect the gdt + stack to
4939 * find out.) */
4940 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4941 {
4942 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4943 if (rcStrict != VINF_SUCCESS)
4944 return rcStrict;
4945 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4946 }
4947
4948 /*
4949 * Start comitting the register changes.
4950 */
4951 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4952 * hidden registers when interrupting 32-bit or 16-bit code! */
4953 if (uNewCpl != uOldCpl)
4954 {
4955 pCtx->ss.Sel = 0 | uNewCpl;
4956 pCtx->ss.ValidSel = 0 | uNewCpl;
4957 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4958 pCtx->ss.u32Limit = UINT32_MAX;
4959 pCtx->ss.u64Base = 0;
4960 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4961 }
4962 pCtx->rsp = uNewRsp - cbStackFrame;
4963 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4964 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4965 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4966 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4967 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4968 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4969 pCtx->rip = uNewRip;
4970
4971 fEfl &= ~fEflToClear;
4972 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4973
4974 if (fFlags & IEM_XCPT_FLAGS_CR2)
4975 pCtx->cr2 = uCr2;
4976
4977 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4978 iemRaiseXcptAdjustState(pCtx, u8Vector);
4979
4980 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4981}
4982
4983
4984/**
4985 * Implements exceptions and interrupts.
4986 *
4987 * All exceptions and interrupts goes thru this function!
4988 *
4989 * @returns VBox strict status code.
4990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4991 * @param cbInstr The number of bytes to offset rIP by in the return
4992 * address.
4993 * @param u8Vector The interrupt / exception vector number.
4994 * @param fFlags The flags.
4995 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4996 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4997 */
4998DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
4999iemRaiseXcptOrInt(PVMCPU pVCpu,
5000 uint8_t cbInstr,
5001 uint8_t u8Vector,
5002 uint32_t fFlags,
5003 uint16_t uErr,
5004 uint64_t uCr2)
5005{
5006 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5007#ifdef IN_RING0
5008 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5009 AssertRCReturn(rc, rc);
5010#endif
5011
5012#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5013 /*
5014 * Flush prefetch buffer
5015 */
5016 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5017#endif
5018
5019 /*
5020 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5021 */
5022 if ( pCtx->eflags.Bits.u1VM
5023 && pCtx->eflags.Bits.u2IOPL != 3
5024 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5025 && (pCtx->cr0 & X86_CR0_PE) )
5026 {
5027 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5028 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5029 u8Vector = X86_XCPT_GP;
5030 uErr = 0;
5031 }
5032#ifdef DBGFTRACE_ENABLED
5033 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5034 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5035 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5036#endif
5037
5038 /*
5039 * Do recursion accounting.
5040 */
5041 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5042 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5043 if (pVCpu->iem.s.cXcptRecursions == 0)
5044 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5045 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5046 else
5047 {
5048 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5049 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5050
5051 /** @todo double and tripple faults. */
5052 if (pVCpu->iem.s.cXcptRecursions >= 3)
5053 {
5054#ifdef DEBUG_bird
5055 AssertFailed();
5056#endif
5057 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5058 }
5059
5060 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5061 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5062 {
5063 ....
5064 } */
5065 }
5066 pVCpu->iem.s.cXcptRecursions++;
5067 pVCpu->iem.s.uCurXcpt = u8Vector;
5068 pVCpu->iem.s.fCurXcpt = fFlags;
5069
5070 /*
5071 * Extensive logging.
5072 */
5073#if defined(LOG_ENABLED) && defined(IN_RING3)
5074 if (LogIs3Enabled())
5075 {
5076 PVM pVM = pVCpu->CTX_SUFF(pVM);
5077 char szRegs[4096];
5078 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5079 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5080 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5081 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5082 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5083 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5084 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5085 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5086 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5087 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5088 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5089 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5090 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5091 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5092 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5093 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5094 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5095 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5096 " efer=%016VR{efer}\n"
5097 " pat=%016VR{pat}\n"
5098 " sf_mask=%016VR{sf_mask}\n"
5099 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5100 " lstar=%016VR{lstar}\n"
5101 " star=%016VR{star} cstar=%016VR{cstar}\n"
5102 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5103 );
5104
5105 char szInstr[256];
5106 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5107 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5108 szInstr, sizeof(szInstr), NULL);
5109 Log3(("%s%s\n", szRegs, szInstr));
5110 }
5111#endif /* LOG_ENABLED */
5112
5113 /*
5114 * Call the mode specific worker function.
5115 */
5116 VBOXSTRICTRC rcStrict;
5117 if (!(pCtx->cr0 & X86_CR0_PE))
5118 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5119 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5120 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5121 else
5122 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5123
5124 /* Flush the prefetch buffer. */
5125#ifdef IEM_WITH_CODE_TLB
5126 pVCpu->iem.s.pbInstrBuf = NULL;
5127#else
5128 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5129#endif
5130
5131 /*
5132 * Unwind.
5133 */
5134 pVCpu->iem.s.cXcptRecursions--;
5135 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5136 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5137 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5138 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5139 return rcStrict;
5140}
5141
5142#ifdef IEM_WITH_SETJMP
5143/**
5144 * See iemRaiseXcptOrInt. Will not return.
5145 */
5146IEM_STATIC DECL_NO_RETURN(void)
5147iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5148 uint8_t cbInstr,
5149 uint8_t u8Vector,
5150 uint32_t fFlags,
5151 uint16_t uErr,
5152 uint64_t uCr2)
5153{
5154 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5155 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5156}
5157#endif
5158
5159
5160/** \#DE - 00. */
5161DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5162{
5163 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5164}
5165
5166
5167/** \#DB - 01.
5168 * @note This automatically clear DR7.GD. */
5169DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5170{
5171 /** @todo set/clear RF. */
5172 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5173 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5174}
5175
5176
5177/** \#UD - 06. */
5178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5179{
5180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5181}
5182
5183
5184/** \#NM - 07. */
5185DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5186{
5187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5188}
5189
5190
5191/** \#TS(err) - 0a. */
5192DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5193{
5194 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5195}
5196
5197
5198/** \#TS(tr) - 0a. */
5199DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5200{
5201 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5202 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5203}
5204
5205
5206/** \#TS(0) - 0a. */
5207DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5208{
5209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5210 0, 0);
5211}
5212
5213
5214/** \#TS(err) - 0a. */
5215DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5216{
5217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5218 uSel & X86_SEL_MASK_OFF_RPL, 0);
5219}
5220
5221
5222/** \#NP(err) - 0b. */
5223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5224{
5225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5226}
5227
5228
5229/** \#NP(seg) - 0b. */
5230DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5231{
5232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5233 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5234}
5235
5236
5237/** \#NP(sel) - 0b. */
5238DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5239{
5240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5241 uSel & ~X86_SEL_RPL, 0);
5242}
5243
5244
5245/** \#SS(seg) - 0c. */
5246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5247{
5248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5249 uSel & ~X86_SEL_RPL, 0);
5250}
5251
5252
5253/** \#SS(err) - 0c. */
5254DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5255{
5256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5257}
5258
5259
5260/** \#GP(n) - 0d. */
5261DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5262{
5263 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5264}
5265
5266
5267/** \#GP(0) - 0d. */
5268DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5269{
5270 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5271}
5272
5273#ifdef IEM_WITH_SETJMP
5274/** \#GP(0) - 0d. */
5275DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5276{
5277 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5278}
5279#endif
5280
5281
5282/** \#GP(sel) - 0d. */
5283DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5284{
5285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5286 Sel & ~X86_SEL_RPL, 0);
5287}
5288
5289
5290/** \#GP(0) - 0d. */
5291DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5292{
5293 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5294}
5295
5296
5297/** \#GP(sel) - 0d. */
5298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5299{
5300 NOREF(iSegReg); NOREF(fAccess);
5301 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5302 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5303}
5304
5305#ifdef IEM_WITH_SETJMP
5306/** \#GP(sel) - 0d, longjmp. */
5307DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5308{
5309 NOREF(iSegReg); NOREF(fAccess);
5310 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5311 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5312}
5313#endif
5314
5315/** \#GP(sel) - 0d. */
5316DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5317{
5318 NOREF(Sel);
5319 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5320}
5321
5322#ifdef IEM_WITH_SETJMP
5323/** \#GP(sel) - 0d, longjmp. */
5324DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5325{
5326 NOREF(Sel);
5327 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5328}
5329#endif
5330
5331
5332/** \#GP(sel) - 0d. */
5333DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5334{
5335 NOREF(iSegReg); NOREF(fAccess);
5336 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5337}
5338
5339#ifdef IEM_WITH_SETJMP
5340/** \#GP(sel) - 0d, longjmp. */
5341DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5342 uint32_t fAccess)
5343{
5344 NOREF(iSegReg); NOREF(fAccess);
5345 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5346}
5347#endif
5348
5349
5350/** \#PF(n) - 0e. */
5351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5352{
5353 uint16_t uErr;
5354 switch (rc)
5355 {
5356 case VERR_PAGE_NOT_PRESENT:
5357 case VERR_PAGE_TABLE_NOT_PRESENT:
5358 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5359 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5360 uErr = 0;
5361 break;
5362
5363 default:
5364 AssertMsgFailed(("%Rrc\n", rc));
5365 case VERR_ACCESS_DENIED:
5366 uErr = X86_TRAP_PF_P;
5367 break;
5368
5369 /** @todo reserved */
5370 }
5371
5372 if (pVCpu->iem.s.uCpl == 3)
5373 uErr |= X86_TRAP_PF_US;
5374
5375 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5376 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5377 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5378 uErr |= X86_TRAP_PF_ID;
5379
5380#if 0 /* This is so much non-sense, really. Why was it done like that? */
5381 /* Note! RW access callers reporting a WRITE protection fault, will clear
5382 the READ flag before calling. So, read-modify-write accesses (RW)
5383 can safely be reported as READ faults. */
5384 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5385 uErr |= X86_TRAP_PF_RW;
5386#else
5387 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5388 {
5389 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5390 uErr |= X86_TRAP_PF_RW;
5391 }
5392#endif
5393
5394 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5395 uErr, GCPtrWhere);
5396}
5397
5398#ifdef IEM_WITH_SETJMP
5399/** \#PF(n) - 0e, longjmp. */
5400IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5401{
5402 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5403}
5404#endif
5405
5406
5407/** \#MF(0) - 10. */
5408DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5409{
5410 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5411}
5412
5413
5414/** \#AC(0) - 11. */
5415DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5416{
5417 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5418}
5419
5420
5421/**
5422 * Macro for calling iemCImplRaiseDivideError().
5423 *
5424 * This enables us to add/remove arguments and force different levels of
5425 * inlining as we wish.
5426 *
5427 * @return Strict VBox status code.
5428 */
5429#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5430IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5431{
5432 NOREF(cbInstr);
5433 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5434}
5435
5436
5437/**
5438 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5439 *
5440 * This enables us to add/remove arguments and force different levels of
5441 * inlining as we wish.
5442 *
5443 * @return Strict VBox status code.
5444 */
5445#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5446IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5447{
5448 NOREF(cbInstr);
5449 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5450}
5451
5452
5453/**
5454 * Macro for calling iemCImplRaiseInvalidOpcode().
5455 *
5456 * This enables us to add/remove arguments and force different levels of
5457 * inlining as we wish.
5458 *
5459 * @return Strict VBox status code.
5460 */
5461#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5462IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5463{
5464 NOREF(cbInstr);
5465 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5466}
5467
5468
5469/** @} */
5470
5471
5472/*
5473 *
5474 * Helpers routines.
5475 * Helpers routines.
5476 * Helpers routines.
5477 *
5478 */
5479
5480/**
5481 * Recalculates the effective operand size.
5482 *
5483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5484 */
5485IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5486{
5487 switch (pVCpu->iem.s.enmCpuMode)
5488 {
5489 case IEMMODE_16BIT:
5490 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5491 break;
5492 case IEMMODE_32BIT:
5493 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5494 break;
5495 case IEMMODE_64BIT:
5496 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5497 {
5498 case 0:
5499 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5500 break;
5501 case IEM_OP_PRF_SIZE_OP:
5502 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5503 break;
5504 case IEM_OP_PRF_SIZE_REX_W:
5505 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5506 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5507 break;
5508 }
5509 break;
5510 default:
5511 AssertFailed();
5512 }
5513}
5514
5515
5516/**
5517 * Sets the default operand size to 64-bit and recalculates the effective
5518 * operand size.
5519 *
5520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5521 */
5522IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5523{
5524 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5525 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5526 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5527 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5528 else
5529 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5530}
5531
5532
5533/*
5534 *
5535 * Common opcode decoders.
5536 * Common opcode decoders.
5537 * Common opcode decoders.
5538 *
5539 */
5540//#include <iprt/mem.h>
5541
5542/**
5543 * Used to add extra details about a stub case.
5544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5545 */
5546IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5547{
5548#if defined(LOG_ENABLED) && defined(IN_RING3)
5549 PVM pVM = pVCpu->CTX_SUFF(pVM);
5550 char szRegs[4096];
5551 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5552 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5553 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5554 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5555 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5556 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5557 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5558 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5559 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5560 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5561 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5562 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5563 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5564 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5565 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5566 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5567 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5568 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5569 " efer=%016VR{efer}\n"
5570 " pat=%016VR{pat}\n"
5571 " sf_mask=%016VR{sf_mask}\n"
5572 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5573 " lstar=%016VR{lstar}\n"
5574 " star=%016VR{star} cstar=%016VR{cstar}\n"
5575 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5576 );
5577
5578 char szInstr[256];
5579 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5580 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5581 szInstr, sizeof(szInstr), NULL);
5582
5583 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5584#else
5585 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5586#endif
5587}
5588
5589/**
5590 * Complains about a stub.
5591 *
5592 * Providing two versions of this macro, one for daily use and one for use when
5593 * working on IEM.
5594 */
5595#if 0
5596# define IEMOP_BITCH_ABOUT_STUB() \
5597 do { \
5598 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5599 iemOpStubMsg2(pVCpu); \
5600 RTAssertPanic(); \
5601 } while (0)
5602#else
5603# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5604#endif
5605
5606/** Stubs an opcode. */
5607#define FNIEMOP_STUB(a_Name) \
5608 FNIEMOP_DEF(a_Name) \
5609 { \
5610 IEMOP_BITCH_ABOUT_STUB(); \
5611 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5612 } \
5613 typedef int ignore_semicolon
5614
5615/** Stubs an opcode. */
5616#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5617 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5618 { \
5619 IEMOP_BITCH_ABOUT_STUB(); \
5620 NOREF(a_Name0); \
5621 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5622 } \
5623 typedef int ignore_semicolon
5624
5625/** Stubs an opcode which currently should raise \#UD. */
5626#define FNIEMOP_UD_STUB(a_Name) \
5627 FNIEMOP_DEF(a_Name) \
5628 { \
5629 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5630 return IEMOP_RAISE_INVALID_OPCODE(); \
5631 } \
5632 typedef int ignore_semicolon
5633
5634/** Stubs an opcode which currently should raise \#UD. */
5635#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5636 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5637 { \
5638 NOREF(a_Name0); \
5639 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5640 return IEMOP_RAISE_INVALID_OPCODE(); \
5641 } \
5642 typedef int ignore_semicolon
5643
5644
5645
5646/** @name Register Access.
5647 * @{
5648 */
5649
5650/**
5651 * Gets a reference (pointer) to the specified hidden segment register.
5652 *
5653 * @returns Hidden register reference.
5654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5655 * @param iSegReg The segment register.
5656 */
5657IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5658{
5659 Assert(iSegReg < X86_SREG_COUNT);
5660 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5661 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5662
5663#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5664 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5665 { /* likely */ }
5666 else
5667 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5668#else
5669 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5670#endif
5671 return pSReg;
5672}
5673
5674
5675/**
5676 * Ensures that the given hidden segment register is up to date.
5677 *
5678 * @returns Hidden register reference.
5679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5680 * @param pSReg The segment register.
5681 */
5682IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5683{
5684#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5685 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5686 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5687#else
5688 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5689 NOREF(pVCpu);
5690#endif
5691 return pSReg;
5692}
5693
5694
5695/**
5696 * Gets a reference (pointer) to the specified segment register (the selector
5697 * value).
5698 *
5699 * @returns Pointer to the selector variable.
5700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5701 * @param iSegReg The segment register.
5702 */
5703DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5704{
5705 Assert(iSegReg < X86_SREG_COUNT);
5706 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5707 return &pCtx->aSRegs[iSegReg].Sel;
5708}
5709
5710
5711/**
5712 * Fetches the selector value of a segment register.
5713 *
5714 * @returns The selector value.
5715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5716 * @param iSegReg The segment register.
5717 */
5718DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5719{
5720 Assert(iSegReg < X86_SREG_COUNT);
5721 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5722}
5723
5724
5725/**
5726 * Gets a reference (pointer) to the specified general purpose register.
5727 *
5728 * @returns Register reference.
5729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5730 * @param iReg The general purpose register.
5731 */
5732DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5733{
5734 Assert(iReg < 16);
5735 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5736 return &pCtx->aGRegs[iReg];
5737}
5738
5739
5740/**
5741 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5742 *
5743 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5744 *
5745 * @returns Register reference.
5746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5747 * @param iReg The register.
5748 */
5749DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5750{
5751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5752 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5753 {
5754 Assert(iReg < 16);
5755 return &pCtx->aGRegs[iReg].u8;
5756 }
5757 /* high 8-bit register. */
5758 Assert(iReg < 8);
5759 return &pCtx->aGRegs[iReg & 3].bHi;
5760}
5761
5762
5763/**
5764 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5765 *
5766 * @returns Register reference.
5767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5768 * @param iReg The register.
5769 */
5770DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5771{
5772 Assert(iReg < 16);
5773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5774 return &pCtx->aGRegs[iReg].u16;
5775}
5776
5777
5778/**
5779 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5780 *
5781 * @returns Register reference.
5782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5783 * @param iReg The register.
5784 */
5785DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5786{
5787 Assert(iReg < 16);
5788 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5789 return &pCtx->aGRegs[iReg].u32;
5790}
5791
5792
5793/**
5794 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5795 *
5796 * @returns Register reference.
5797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5798 * @param iReg The register.
5799 */
5800DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5801{
5802 Assert(iReg < 64);
5803 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5804 return &pCtx->aGRegs[iReg].u64;
5805}
5806
5807
5808/**
5809 * Fetches the value of a 8-bit general purpose register.
5810 *
5811 * @returns The register value.
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param iReg The register.
5814 */
5815DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5816{
5817 return *iemGRegRefU8(pVCpu, iReg);
5818}
5819
5820
5821/**
5822 * Fetches the value of a 16-bit general purpose register.
5823 *
5824 * @returns The register value.
5825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5826 * @param iReg The register.
5827 */
5828DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5829{
5830 Assert(iReg < 16);
5831 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5832}
5833
5834
5835/**
5836 * Fetches the value of a 32-bit general purpose register.
5837 *
5838 * @returns The register value.
5839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5840 * @param iReg The register.
5841 */
5842DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5843{
5844 Assert(iReg < 16);
5845 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5846}
5847
5848
5849/**
5850 * Fetches the value of a 64-bit general purpose register.
5851 *
5852 * @returns The register value.
5853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5854 * @param iReg The register.
5855 */
5856DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5857{
5858 Assert(iReg < 16);
5859 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5860}
5861
5862
5863/**
5864 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5865 *
5866 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5867 * segment limit.
5868 *
5869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5870 * @param offNextInstr The offset of the next instruction.
5871 */
5872IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5873{
5874 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5875 switch (pVCpu->iem.s.enmEffOpSize)
5876 {
5877 case IEMMODE_16BIT:
5878 {
5879 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5880 if ( uNewIp > pCtx->cs.u32Limit
5881 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5882 return iemRaiseGeneralProtectionFault0(pVCpu);
5883 pCtx->rip = uNewIp;
5884 break;
5885 }
5886
5887 case IEMMODE_32BIT:
5888 {
5889 Assert(pCtx->rip <= UINT32_MAX);
5890 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5891
5892 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5893 if (uNewEip > pCtx->cs.u32Limit)
5894 return iemRaiseGeneralProtectionFault0(pVCpu);
5895 pCtx->rip = uNewEip;
5896 break;
5897 }
5898
5899 case IEMMODE_64BIT:
5900 {
5901 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5902
5903 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5904 if (!IEM_IS_CANONICAL(uNewRip))
5905 return iemRaiseGeneralProtectionFault0(pVCpu);
5906 pCtx->rip = uNewRip;
5907 break;
5908 }
5909
5910 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5911 }
5912
5913 pCtx->eflags.Bits.u1RF = 0;
5914
5915#ifndef IEM_WITH_CODE_TLB
5916 /* Flush the prefetch buffer. */
5917 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5918#endif
5919
5920 return VINF_SUCCESS;
5921}
5922
5923
5924/**
5925 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5926 *
5927 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5928 * segment limit.
5929 *
5930 * @returns Strict VBox status code.
5931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5932 * @param offNextInstr The offset of the next instruction.
5933 */
5934IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5935{
5936 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5937 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5938
5939 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5940 if ( uNewIp > pCtx->cs.u32Limit
5941 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5942 return iemRaiseGeneralProtectionFault0(pVCpu);
5943 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5944 pCtx->rip = uNewIp;
5945 pCtx->eflags.Bits.u1RF = 0;
5946
5947#ifndef IEM_WITH_CODE_TLB
5948 /* Flush the prefetch buffer. */
5949 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5950#endif
5951
5952 return VINF_SUCCESS;
5953}
5954
5955
5956/**
5957 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5958 *
5959 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5960 * segment limit.
5961 *
5962 * @returns Strict VBox status code.
5963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5964 * @param offNextInstr The offset of the next instruction.
5965 */
5966IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5967{
5968 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5969 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5970
5971 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5972 {
5973 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5974
5975 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5976 if (uNewEip > pCtx->cs.u32Limit)
5977 return iemRaiseGeneralProtectionFault0(pVCpu);
5978 pCtx->rip = uNewEip;
5979 }
5980 else
5981 {
5982 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5983
5984 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5985 if (!IEM_IS_CANONICAL(uNewRip))
5986 return iemRaiseGeneralProtectionFault0(pVCpu);
5987 pCtx->rip = uNewRip;
5988 }
5989 pCtx->eflags.Bits.u1RF = 0;
5990
5991#ifndef IEM_WITH_CODE_TLB
5992 /* Flush the prefetch buffer. */
5993 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5994#endif
5995
5996 return VINF_SUCCESS;
5997}
5998
5999
6000/**
6001 * Performs a near jump to the specified address.
6002 *
6003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6004 * segment limit.
6005 *
6006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6007 * @param uNewRip The new RIP value.
6008 */
6009IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6010{
6011 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6012 switch (pVCpu->iem.s.enmEffOpSize)
6013 {
6014 case IEMMODE_16BIT:
6015 {
6016 Assert(uNewRip <= UINT16_MAX);
6017 if ( uNewRip > pCtx->cs.u32Limit
6018 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6019 return iemRaiseGeneralProtectionFault0(pVCpu);
6020 /** @todo Test 16-bit jump in 64-bit mode. */
6021 pCtx->rip = uNewRip;
6022 break;
6023 }
6024
6025 case IEMMODE_32BIT:
6026 {
6027 Assert(uNewRip <= UINT32_MAX);
6028 Assert(pCtx->rip <= UINT32_MAX);
6029 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6030
6031 if (uNewRip > pCtx->cs.u32Limit)
6032 return iemRaiseGeneralProtectionFault0(pVCpu);
6033 pCtx->rip = uNewRip;
6034 break;
6035 }
6036
6037 case IEMMODE_64BIT:
6038 {
6039 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6040
6041 if (!IEM_IS_CANONICAL(uNewRip))
6042 return iemRaiseGeneralProtectionFault0(pVCpu);
6043 pCtx->rip = uNewRip;
6044 break;
6045 }
6046
6047 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6048 }
6049
6050 pCtx->eflags.Bits.u1RF = 0;
6051
6052#ifndef IEM_WITH_CODE_TLB
6053 /* Flush the prefetch buffer. */
6054 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6055#endif
6056
6057 return VINF_SUCCESS;
6058}
6059
6060
6061/**
6062 * Get the address of the top of the stack.
6063 *
6064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6065 * @param pCtx The CPU context which SP/ESP/RSP should be
6066 * read.
6067 */
6068DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6069{
6070 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6071 return pCtx->rsp;
6072 if (pCtx->ss.Attr.n.u1DefBig)
6073 return pCtx->esp;
6074 return pCtx->sp;
6075}
6076
6077
6078/**
6079 * Updates the RIP/EIP/IP to point to the next instruction.
6080 *
6081 * This function leaves the EFLAGS.RF flag alone.
6082 *
6083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6084 * @param cbInstr The number of bytes to add.
6085 */
6086IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6087{
6088 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6089 switch (pVCpu->iem.s.enmCpuMode)
6090 {
6091 case IEMMODE_16BIT:
6092 Assert(pCtx->rip <= UINT16_MAX);
6093 pCtx->eip += cbInstr;
6094 pCtx->eip &= UINT32_C(0xffff);
6095 break;
6096
6097 case IEMMODE_32BIT:
6098 pCtx->eip += cbInstr;
6099 Assert(pCtx->rip <= UINT32_MAX);
6100 break;
6101
6102 case IEMMODE_64BIT:
6103 pCtx->rip += cbInstr;
6104 break;
6105 default: AssertFailed();
6106 }
6107}
6108
6109
6110#if 0
6111/**
6112 * Updates the RIP/EIP/IP to point to the next instruction.
6113 *
6114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6115 */
6116IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6117{
6118 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6119}
6120#endif
6121
6122
6123
6124/**
6125 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6126 *
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param cbInstr The number of bytes to add.
6129 */
6130IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6131{
6132 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6133
6134 pCtx->eflags.Bits.u1RF = 0;
6135
6136 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6137#if ARCH_BITS >= 64
6138 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6139 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6140 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6141#else
6142 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6143 pCtx->rip += cbInstr;
6144 else
6145 {
6146 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6147 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6148 }
6149#endif
6150}
6151
6152
6153/**
6154 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6155 *
6156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6157 */
6158IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6159{
6160 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6161}
6162
6163
6164/**
6165 * Adds to the stack pointer.
6166 *
6167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6168 * @param pCtx The CPU context which SP/ESP/RSP should be
6169 * updated.
6170 * @param cbToAdd The number of bytes to add (8-bit!).
6171 */
6172DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6173{
6174 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6175 pCtx->rsp += cbToAdd;
6176 else if (pCtx->ss.Attr.n.u1DefBig)
6177 pCtx->esp += cbToAdd;
6178 else
6179 pCtx->sp += cbToAdd;
6180}
6181
6182
6183/**
6184 * Subtracts from the stack pointer.
6185 *
6186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6187 * @param pCtx The CPU context which SP/ESP/RSP should be
6188 * updated.
6189 * @param cbToSub The number of bytes to subtract (8-bit!).
6190 */
6191DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6192{
6193 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6194 pCtx->rsp -= cbToSub;
6195 else if (pCtx->ss.Attr.n.u1DefBig)
6196 pCtx->esp -= cbToSub;
6197 else
6198 pCtx->sp -= cbToSub;
6199}
6200
6201
6202/**
6203 * Adds to the temporary stack pointer.
6204 *
6205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6206 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6207 * @param cbToAdd The number of bytes to add (16-bit).
6208 * @param pCtx Where to get the current stack mode.
6209 */
6210DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6211{
6212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6213 pTmpRsp->u += cbToAdd;
6214 else if (pCtx->ss.Attr.n.u1DefBig)
6215 pTmpRsp->DWords.dw0 += cbToAdd;
6216 else
6217 pTmpRsp->Words.w0 += cbToAdd;
6218}
6219
6220
6221/**
6222 * Subtracts from the temporary stack pointer.
6223 *
6224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6225 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6226 * @param cbToSub The number of bytes to subtract.
6227 * @param pCtx Where to get the current stack mode.
6228 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6229 * expecting that.
6230 */
6231DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6232{
6233 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6234 pTmpRsp->u -= cbToSub;
6235 else if (pCtx->ss.Attr.n.u1DefBig)
6236 pTmpRsp->DWords.dw0 -= cbToSub;
6237 else
6238 pTmpRsp->Words.w0 -= cbToSub;
6239}
6240
6241
6242/**
6243 * Calculates the effective stack address for a push of the specified size as
6244 * well as the new RSP value (upper bits may be masked).
6245 *
6246 * @returns Effective stack addressf for the push.
6247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6248 * @param pCtx Where to get the current stack mode.
6249 * @param cbItem The size of the stack item to pop.
6250 * @param puNewRsp Where to return the new RSP value.
6251 */
6252DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6253{
6254 RTUINT64U uTmpRsp;
6255 RTGCPTR GCPtrTop;
6256 uTmpRsp.u = pCtx->rsp;
6257
6258 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6259 GCPtrTop = uTmpRsp.u -= cbItem;
6260 else if (pCtx->ss.Attr.n.u1DefBig)
6261 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6262 else
6263 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6264 *puNewRsp = uTmpRsp.u;
6265 return GCPtrTop;
6266}
6267
6268
6269/**
6270 * Gets the current stack pointer and calculates the value after a pop of the
6271 * specified size.
6272 *
6273 * @returns Current stack pointer.
6274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6275 * @param pCtx Where to get the current stack mode.
6276 * @param cbItem The size of the stack item to pop.
6277 * @param puNewRsp Where to return the new RSP value.
6278 */
6279DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6280{
6281 RTUINT64U uTmpRsp;
6282 RTGCPTR GCPtrTop;
6283 uTmpRsp.u = pCtx->rsp;
6284
6285 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6286 {
6287 GCPtrTop = uTmpRsp.u;
6288 uTmpRsp.u += cbItem;
6289 }
6290 else if (pCtx->ss.Attr.n.u1DefBig)
6291 {
6292 GCPtrTop = uTmpRsp.DWords.dw0;
6293 uTmpRsp.DWords.dw0 += cbItem;
6294 }
6295 else
6296 {
6297 GCPtrTop = uTmpRsp.Words.w0;
6298 uTmpRsp.Words.w0 += cbItem;
6299 }
6300 *puNewRsp = uTmpRsp.u;
6301 return GCPtrTop;
6302}
6303
6304
6305/**
6306 * Calculates the effective stack address for a push of the specified size as
6307 * well as the new temporary RSP value (upper bits may be masked).
6308 *
6309 * @returns Effective stack addressf for the push.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param pCtx Where to get the current stack mode.
6312 * @param pTmpRsp The temporary stack pointer. This is updated.
6313 * @param cbItem The size of the stack item to pop.
6314 */
6315DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6316{
6317 RTGCPTR GCPtrTop;
6318
6319 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6320 GCPtrTop = pTmpRsp->u -= cbItem;
6321 else if (pCtx->ss.Attr.n.u1DefBig)
6322 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6323 else
6324 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6325 return GCPtrTop;
6326}
6327
6328
6329/**
6330 * Gets the effective stack address for a pop of the specified size and
6331 * calculates and updates the temporary RSP.
6332 *
6333 * @returns Current stack pointer.
6334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6335 * @param pCtx Where to get the current stack mode.
6336 * @param pTmpRsp The temporary stack pointer. This is updated.
6337 * @param cbItem The size of the stack item to pop.
6338 */
6339DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6340{
6341 RTGCPTR GCPtrTop;
6342 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6343 {
6344 GCPtrTop = pTmpRsp->u;
6345 pTmpRsp->u += cbItem;
6346 }
6347 else if (pCtx->ss.Attr.n.u1DefBig)
6348 {
6349 GCPtrTop = pTmpRsp->DWords.dw0;
6350 pTmpRsp->DWords.dw0 += cbItem;
6351 }
6352 else
6353 {
6354 GCPtrTop = pTmpRsp->Words.w0;
6355 pTmpRsp->Words.w0 += cbItem;
6356 }
6357 return GCPtrTop;
6358}
6359
6360/** @} */
6361
6362
6363/** @name FPU access and helpers.
6364 *
6365 * @{
6366 */
6367
6368
6369/**
6370 * Hook for preparing to use the host FPU.
6371 *
6372 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6373 *
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 */
6376DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6377{
6378#ifdef IN_RING3
6379 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6380#else
6381 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6382#endif
6383}
6384
6385
6386/**
6387 * Hook for preparing to use the host FPU for SSE
6388 *
6389 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6390 *
6391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6392 */
6393DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6394{
6395 iemFpuPrepareUsage(pVCpu);
6396}
6397
6398
6399/**
6400 * Hook for actualizing the guest FPU state before the interpreter reads it.
6401 *
6402 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6403 *
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 */
6406DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6407{
6408#ifdef IN_RING3
6409 NOREF(pVCpu);
6410#else
6411 CPUMRZFpuStateActualizeForRead(pVCpu);
6412#endif
6413}
6414
6415
6416/**
6417 * Hook for actualizing the guest FPU state before the interpreter changes it.
6418 *
6419 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6420 *
6421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6422 */
6423DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6424{
6425#ifdef IN_RING3
6426 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6427#else
6428 CPUMRZFpuStateActualizeForChange(pVCpu);
6429#endif
6430}
6431
6432
6433/**
6434 * Hook for actualizing the guest XMM0..15 register state for read only.
6435 *
6436 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6437 *
6438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6439 */
6440DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6441{
6442#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6443 NOREF(pVCpu);
6444#else
6445 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6446#endif
6447}
6448
6449
6450/**
6451 * Hook for actualizing the guest XMM0..15 register state for read+write.
6452 *
6453 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6454 *
6455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6456 */
6457DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6458{
6459#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6460 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6461#else
6462 CPUMRZFpuStateActualizeForChange(pVCpu);
6463#endif
6464}
6465
6466
6467/**
6468 * Stores a QNaN value into a FPU register.
6469 *
6470 * @param pReg Pointer to the register.
6471 */
6472DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6473{
6474 pReg->au32[0] = UINT32_C(0x00000000);
6475 pReg->au32[1] = UINT32_C(0xc0000000);
6476 pReg->au16[4] = UINT16_C(0xffff);
6477}
6478
6479
6480/**
6481 * Updates the FOP, FPU.CS and FPUIP registers.
6482 *
6483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6484 * @param pCtx The CPU context.
6485 * @param pFpuCtx The FPU context.
6486 */
6487DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6488{
6489 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6490 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6491 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6492 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6493 {
6494 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6495 * happens in real mode here based on the fnsave and fnstenv images. */
6496 pFpuCtx->CS = 0;
6497 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6498 }
6499 else
6500 {
6501 pFpuCtx->CS = pCtx->cs.Sel;
6502 pFpuCtx->FPUIP = pCtx->rip;
6503 }
6504}
6505
6506
6507/**
6508 * Updates the x87.DS and FPUDP registers.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6511 * @param pCtx The CPU context.
6512 * @param pFpuCtx The FPU context.
6513 * @param iEffSeg The effective segment register.
6514 * @param GCPtrEff The effective address relative to @a iEffSeg.
6515 */
6516DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6517{
6518 RTSEL sel;
6519 switch (iEffSeg)
6520 {
6521 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6522 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6523 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6524 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6525 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6526 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6527 default:
6528 AssertMsgFailed(("%d\n", iEffSeg));
6529 sel = pCtx->ds.Sel;
6530 }
6531 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6532 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6533 {
6534 pFpuCtx->DS = 0;
6535 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6536 }
6537 else
6538 {
6539 pFpuCtx->DS = sel;
6540 pFpuCtx->FPUDP = GCPtrEff;
6541 }
6542}
6543
6544
6545/**
6546 * Rotates the stack registers in the push direction.
6547 *
6548 * @param pFpuCtx The FPU context.
6549 * @remarks This is a complete waste of time, but fxsave stores the registers in
6550 * stack order.
6551 */
6552DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6553{
6554 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6555 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6556 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6557 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6558 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6559 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6560 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6561 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6562 pFpuCtx->aRegs[0].r80 = r80Tmp;
6563}
6564
6565
6566/**
6567 * Rotates the stack registers in the pop direction.
6568 *
6569 * @param pFpuCtx The FPU context.
6570 * @remarks This is a complete waste of time, but fxsave stores the registers in
6571 * stack order.
6572 */
6573DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6574{
6575 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6576 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6577 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6578 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6579 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6580 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6581 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6582 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6583 pFpuCtx->aRegs[7].r80 = r80Tmp;
6584}
6585
6586
6587/**
6588 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6589 * exception prevents it.
6590 *
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param pResult The FPU operation result to push.
6593 * @param pFpuCtx The FPU context.
6594 */
6595IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6596{
6597 /* Update FSW and bail if there are pending exceptions afterwards. */
6598 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6599 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6600 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6601 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6602 {
6603 pFpuCtx->FSW = fFsw;
6604 return;
6605 }
6606
6607 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6608 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6609 {
6610 /* All is fine, push the actual value. */
6611 pFpuCtx->FTW |= RT_BIT(iNewTop);
6612 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6613 }
6614 else if (pFpuCtx->FCW & X86_FCW_IM)
6615 {
6616 /* Masked stack overflow, push QNaN. */
6617 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6618 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6619 }
6620 else
6621 {
6622 /* Raise stack overflow, don't push anything. */
6623 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6624 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6625 return;
6626 }
6627
6628 fFsw &= ~X86_FSW_TOP_MASK;
6629 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6630 pFpuCtx->FSW = fFsw;
6631
6632 iemFpuRotateStackPush(pFpuCtx);
6633}
6634
6635
6636/**
6637 * Stores a result in a FPU register and updates the FSW and FTW.
6638 *
6639 * @param pFpuCtx The FPU context.
6640 * @param pResult The result to store.
6641 * @param iStReg Which FPU register to store it in.
6642 */
6643IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6644{
6645 Assert(iStReg < 8);
6646 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6647 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6648 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6649 pFpuCtx->FTW |= RT_BIT(iReg);
6650 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6651}
6652
6653
6654/**
6655 * Only updates the FPU status word (FSW) with the result of the current
6656 * instruction.
6657 *
6658 * @param pFpuCtx The FPU context.
6659 * @param u16FSW The FSW output of the current instruction.
6660 */
6661IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6662{
6663 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6664 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6665}
6666
6667
6668/**
6669 * Pops one item off the FPU stack if no pending exception prevents it.
6670 *
6671 * @param pFpuCtx The FPU context.
6672 */
6673IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6674{
6675 /* Check pending exceptions. */
6676 uint16_t uFSW = pFpuCtx->FSW;
6677 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6678 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6679 return;
6680
6681 /* TOP--. */
6682 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6683 uFSW &= ~X86_FSW_TOP_MASK;
6684 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6685 pFpuCtx->FSW = uFSW;
6686
6687 /* Mark the previous ST0 as empty. */
6688 iOldTop >>= X86_FSW_TOP_SHIFT;
6689 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6690
6691 /* Rotate the registers. */
6692 iemFpuRotateStackPop(pFpuCtx);
6693}
6694
6695
6696/**
6697 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6698 *
6699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6700 * @param pResult The FPU operation result to push.
6701 */
6702IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6703{
6704 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6705 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6706 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6707 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6708}
6709
6710
6711/**
6712 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6713 * and sets FPUDP and FPUDS.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param pResult The FPU operation result to push.
6717 * @param iEffSeg The effective segment register.
6718 * @param GCPtrEff The effective address relative to @a iEffSeg.
6719 */
6720IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6721{
6722 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6723 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6724 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6726 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6727}
6728
6729
6730/**
6731 * Replace ST0 with the first value and push the second onto the FPU stack,
6732 * unless a pending exception prevents it.
6733 *
6734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6735 * @param pResult The FPU operation result to store and push.
6736 */
6737IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6738{
6739 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6740 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6741 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6742
6743 /* Update FSW and bail if there are pending exceptions afterwards. */
6744 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6745 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6746 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6747 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6748 {
6749 pFpuCtx->FSW = fFsw;
6750 return;
6751 }
6752
6753 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6754 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6755 {
6756 /* All is fine, push the actual value. */
6757 pFpuCtx->FTW |= RT_BIT(iNewTop);
6758 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6759 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6760 }
6761 else if (pFpuCtx->FCW & X86_FCW_IM)
6762 {
6763 /* Masked stack overflow, push QNaN. */
6764 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6765 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6766 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6767 }
6768 else
6769 {
6770 /* Raise stack overflow, don't push anything. */
6771 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6772 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6773 return;
6774 }
6775
6776 fFsw &= ~X86_FSW_TOP_MASK;
6777 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6778 pFpuCtx->FSW = fFsw;
6779
6780 iemFpuRotateStackPush(pFpuCtx);
6781}
6782
6783
6784/**
6785 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6786 * FOP.
6787 *
6788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6789 * @param pResult The result to store.
6790 * @param iStReg Which FPU register to store it in.
6791 */
6792IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6793{
6794 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6795 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6796 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6797 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6798}
6799
6800
6801/**
6802 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6803 * FOP, and then pops the stack.
6804 *
6805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6806 * @param pResult The result to store.
6807 * @param iStReg Which FPU register to store it in.
6808 */
6809IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6810{
6811 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6812 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6813 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6814 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6815 iemFpuMaybePopOne(pFpuCtx);
6816}
6817
6818
6819/**
6820 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6821 * FPUDP, and FPUDS.
6822 *
6823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6824 * @param pResult The result to store.
6825 * @param iStReg Which FPU register to store it in.
6826 * @param iEffSeg The effective memory operand selector register.
6827 * @param GCPtrEff The effective memory operand offset.
6828 */
6829IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6830 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6831{
6832 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6833 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6834 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6835 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6836 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6837}
6838
6839
6840/**
6841 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6842 * FPUDP, and FPUDS, and then pops the stack.
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 * @param pResult The result to store.
6846 * @param iStReg Which FPU register to store it in.
6847 * @param iEffSeg The effective memory operand selector register.
6848 * @param GCPtrEff The effective memory operand offset.
6849 */
6850IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6851 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6852{
6853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6855 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6856 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6857 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6858 iemFpuMaybePopOne(pFpuCtx);
6859}
6860
6861
6862/**
6863 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6864 *
6865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6866 */
6867IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6868{
6869 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6870 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6871 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6872}
6873
6874
6875/**
6876 * Marks the specified stack register as free (for FFREE).
6877 *
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 * @param iStReg The register to free.
6880 */
6881IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6882{
6883 Assert(iStReg < 8);
6884 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6885 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6886 pFpuCtx->FTW &= ~RT_BIT(iReg);
6887}
6888
6889
6890/**
6891 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6896{
6897 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6898 uint16_t uFsw = pFpuCtx->FSW;
6899 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6900 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6901 uFsw &= ~X86_FSW_TOP_MASK;
6902 uFsw |= uTop;
6903 pFpuCtx->FSW = uFsw;
6904}
6905
6906
6907/**
6908 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6913{
6914 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6915 uint16_t uFsw = pFpuCtx->FSW;
6916 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6917 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6918 uFsw &= ~X86_FSW_TOP_MASK;
6919 uFsw |= uTop;
6920 pFpuCtx->FSW = uFsw;
6921}
6922
6923
6924/**
6925 * Updates the FSW, FOP, FPUIP, and FPUCS.
6926 *
6927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6928 * @param u16FSW The FSW from the current instruction.
6929 */
6930IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6931{
6932 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6933 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6934 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6935 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6936}
6937
6938
6939/**
6940 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6941 *
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 * @param u16FSW The FSW from the current instruction.
6944 */
6945IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6946{
6947 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6948 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6949 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6950 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6951 iemFpuMaybePopOne(pFpuCtx);
6952}
6953
6954
6955/**
6956 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 * @param u16FSW The FSW from the current instruction.
6960 * @param iEffSeg The effective memory operand selector register.
6961 * @param GCPtrEff The effective memory operand offset.
6962 */
6963IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6964{
6965 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6966 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6967 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6968 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6969 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6970}
6971
6972
6973/**
6974 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 * @param u16FSW The FSW from the current instruction.
6978 */
6979IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6980{
6981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6982 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6983 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6984 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6985 iemFpuMaybePopOne(pFpuCtx);
6986 iemFpuMaybePopOne(pFpuCtx);
6987}
6988
6989
6990/**
6991 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 * @param u16FSW The FSW from the current instruction.
6995 * @param iEffSeg The effective memory operand selector register.
6996 * @param GCPtrEff The effective memory operand offset.
6997 */
6998IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6999{
7000 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7001 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7002 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7003 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7004 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7005 iemFpuMaybePopOne(pFpuCtx);
7006}
7007
7008
7009/**
7010 * Worker routine for raising an FPU stack underflow exception.
7011 *
7012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7013 * @param pFpuCtx The FPU context.
7014 * @param iStReg The stack register being accessed.
7015 */
7016IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
7017{
7018 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7019 if (pFpuCtx->FCW & X86_FCW_IM)
7020 {
7021 /* Masked underflow. */
7022 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7023 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7024 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7025 if (iStReg != UINT8_MAX)
7026 {
7027 pFpuCtx->FTW |= RT_BIT(iReg);
7028 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7029 }
7030 }
7031 else
7032 {
7033 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7034 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7035 }
7036}
7037
7038
7039/**
7040 * Raises a FPU stack underflow exception.
7041 *
7042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7043 * @param iStReg The destination register that should be loaded
7044 * with QNaN if \#IS is not masked. Specify
7045 * UINT8_MAX if none (like for fcom).
7046 */
7047DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7048{
7049 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7050 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7051 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7052 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7053}
7054
7055
7056DECL_NO_INLINE(IEM_STATIC, void)
7057iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7058{
7059 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7060 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7061 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7062 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7063 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7064}
7065
7066
7067DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7068{
7069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7070 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7071 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7072 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7073 iemFpuMaybePopOne(pFpuCtx);
7074}
7075
7076
7077DECL_NO_INLINE(IEM_STATIC, void)
7078iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7079{
7080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7081 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7082 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7083 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7084 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
7085 iemFpuMaybePopOne(pFpuCtx);
7086}
7087
7088
7089DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7090{
7091 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7092 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7093 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7094 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
7095 iemFpuMaybePopOne(pFpuCtx);
7096 iemFpuMaybePopOne(pFpuCtx);
7097}
7098
7099
7100DECL_NO_INLINE(IEM_STATIC, void)
7101iemFpuStackPushUnderflow(PVMCPU pVCpu)
7102{
7103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7104 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7105 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7106
7107 if (pFpuCtx->FCW & X86_FCW_IM)
7108 {
7109 /* Masked overflow - Push QNaN. */
7110 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7111 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7112 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7113 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7114 pFpuCtx->FTW |= RT_BIT(iNewTop);
7115 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7116 iemFpuRotateStackPush(pFpuCtx);
7117 }
7118 else
7119 {
7120 /* Exception pending - don't change TOP or the register stack. */
7121 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7122 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7123 }
7124}
7125
7126
7127DECL_NO_INLINE(IEM_STATIC, void)
7128iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7129{
7130 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7131 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7132 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7133
7134 if (pFpuCtx->FCW & X86_FCW_IM)
7135 {
7136 /* Masked overflow - Push QNaN. */
7137 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7138 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7139 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7140 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7141 pFpuCtx->FTW |= RT_BIT(iNewTop);
7142 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7143 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7144 iemFpuRotateStackPush(pFpuCtx);
7145 }
7146 else
7147 {
7148 /* Exception pending - don't change TOP or the register stack. */
7149 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7150 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7151 }
7152}
7153
7154
7155/**
7156 * Worker routine for raising an FPU stack overflow exception on a push.
7157 *
7158 * @param pFpuCtx The FPU context.
7159 */
7160IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7161{
7162 if (pFpuCtx->FCW & X86_FCW_IM)
7163 {
7164 /* Masked overflow. */
7165 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7166 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7167 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7168 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7169 pFpuCtx->FTW |= RT_BIT(iNewTop);
7170 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7171 iemFpuRotateStackPush(pFpuCtx);
7172 }
7173 else
7174 {
7175 /* Exception pending - don't change TOP or the register stack. */
7176 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7177 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7178 }
7179}
7180
7181
7182/**
7183 * Raises a FPU stack overflow exception on a push.
7184 *
7185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7186 */
7187DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7188{
7189 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7190 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7191 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7192 iemFpuStackPushOverflowOnly(pFpuCtx);
7193}
7194
7195
7196/**
7197 * Raises a FPU stack overflow exception on a push with a memory operand.
7198 *
7199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7200 * @param iEffSeg The effective memory operand selector register.
7201 * @param GCPtrEff The effective memory operand offset.
7202 */
7203DECL_NO_INLINE(IEM_STATIC, void)
7204iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7205{
7206 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7207 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7208 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7209 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7210 iemFpuStackPushOverflowOnly(pFpuCtx);
7211}
7212
7213
7214IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7215{
7216 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7217 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7218 if (pFpuCtx->FTW & RT_BIT(iReg))
7219 return VINF_SUCCESS;
7220 return VERR_NOT_FOUND;
7221}
7222
7223
7224IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7225{
7226 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7227 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7228 if (pFpuCtx->FTW & RT_BIT(iReg))
7229 {
7230 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7231 return VINF_SUCCESS;
7232 }
7233 return VERR_NOT_FOUND;
7234}
7235
7236
7237IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7238 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7239{
7240 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7241 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7242 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7243 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7244 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7245 {
7246 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7247 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7248 return VINF_SUCCESS;
7249 }
7250 return VERR_NOT_FOUND;
7251}
7252
7253
7254IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7255{
7256 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7257 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7258 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7259 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7260 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7261 {
7262 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7263 return VINF_SUCCESS;
7264 }
7265 return VERR_NOT_FOUND;
7266}
7267
7268
7269/**
7270 * Updates the FPU exception status after FCW is changed.
7271 *
7272 * @param pFpuCtx The FPU context.
7273 */
7274IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7275{
7276 uint16_t u16Fsw = pFpuCtx->FSW;
7277 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7278 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7279 else
7280 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7281 pFpuCtx->FSW = u16Fsw;
7282}
7283
7284
7285/**
7286 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7287 *
7288 * @returns The full FTW.
7289 * @param pFpuCtx The FPU context.
7290 */
7291IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7292{
7293 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7294 uint16_t u16Ftw = 0;
7295 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7296 for (unsigned iSt = 0; iSt < 8; iSt++)
7297 {
7298 unsigned const iReg = (iSt + iTop) & 7;
7299 if (!(u8Ftw & RT_BIT(iReg)))
7300 u16Ftw |= 3 << (iReg * 2); /* empty */
7301 else
7302 {
7303 uint16_t uTag;
7304 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7305 if (pr80Reg->s.uExponent == 0x7fff)
7306 uTag = 2; /* Exponent is all 1's => Special. */
7307 else if (pr80Reg->s.uExponent == 0x0000)
7308 {
7309 if (pr80Reg->s.u64Mantissa == 0x0000)
7310 uTag = 1; /* All bits are zero => Zero. */
7311 else
7312 uTag = 2; /* Must be special. */
7313 }
7314 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7315 uTag = 0; /* Valid. */
7316 else
7317 uTag = 2; /* Must be special. */
7318
7319 u16Ftw |= uTag << (iReg * 2); /* empty */
7320 }
7321 }
7322
7323 return u16Ftw;
7324}
7325
7326
7327/**
7328 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7329 *
7330 * @returns The compressed FTW.
7331 * @param u16FullFtw The full FTW to convert.
7332 */
7333IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7334{
7335 uint8_t u8Ftw = 0;
7336 for (unsigned i = 0; i < 8; i++)
7337 {
7338 if ((u16FullFtw & 3) != 3 /*empty*/)
7339 u8Ftw |= RT_BIT(i);
7340 u16FullFtw >>= 2;
7341 }
7342
7343 return u8Ftw;
7344}
7345
7346/** @} */
7347
7348
7349/** @name Memory access.
7350 *
7351 * @{
7352 */
7353
7354
7355/**
7356 * Updates the IEMCPU::cbWritten counter if applicable.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param fAccess The access being accounted for.
7360 * @param cbMem The access size.
7361 */
7362DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7363{
7364 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7365 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7366 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7367}
7368
7369
7370/**
7371 * Checks if the given segment can be written to, raise the appropriate
7372 * exception if not.
7373 *
7374 * @returns VBox strict status code.
7375 *
7376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7377 * @param pHid Pointer to the hidden register.
7378 * @param iSegReg The register number.
7379 * @param pu64BaseAddr Where to return the base address to use for the
7380 * segment. (In 64-bit code it may differ from the
7381 * base in the hidden segment.)
7382 */
7383IEM_STATIC VBOXSTRICTRC
7384iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7385{
7386 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7387 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7388 else
7389 {
7390 if (!pHid->Attr.n.u1Present)
7391 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7392
7393 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7394 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7395 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7396 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7397 *pu64BaseAddr = pHid->u64Base;
7398 }
7399 return VINF_SUCCESS;
7400}
7401
7402
7403/**
7404 * Checks if the given segment can be read from, raise the appropriate
7405 * exception if not.
7406 *
7407 * @returns VBox strict status code.
7408 *
7409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7410 * @param pHid Pointer to the hidden register.
7411 * @param iSegReg The register number.
7412 * @param pu64BaseAddr Where to return the base address to use for the
7413 * segment. (In 64-bit code it may differ from the
7414 * base in the hidden segment.)
7415 */
7416IEM_STATIC VBOXSTRICTRC
7417iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7418{
7419 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7420 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7421 else
7422 {
7423 if (!pHid->Attr.n.u1Present)
7424 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7425
7426 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7427 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7428 *pu64BaseAddr = pHid->u64Base;
7429 }
7430 return VINF_SUCCESS;
7431}
7432
7433
7434/**
7435 * Applies the segment limit, base and attributes.
7436 *
7437 * This may raise a \#GP or \#SS.
7438 *
7439 * @returns VBox strict status code.
7440 *
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param fAccess The kind of access which is being performed.
7443 * @param iSegReg The index of the segment register to apply.
7444 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7445 * TSS, ++).
7446 * @param cbMem The access size.
7447 * @param pGCPtrMem Pointer to the guest memory address to apply
7448 * segmentation to. Input and output parameter.
7449 */
7450IEM_STATIC VBOXSTRICTRC
7451iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7452{
7453 if (iSegReg == UINT8_MAX)
7454 return VINF_SUCCESS;
7455
7456 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7457 switch (pVCpu->iem.s.enmCpuMode)
7458 {
7459 case IEMMODE_16BIT:
7460 case IEMMODE_32BIT:
7461 {
7462 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7463 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7464
7465 if ( pSel->Attr.n.u1Present
7466 && !pSel->Attr.n.u1Unusable)
7467 {
7468 Assert(pSel->Attr.n.u1DescType);
7469 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7470 {
7471 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7472 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7473 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7474
7475 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7476 {
7477 /** @todo CPL check. */
7478 }
7479
7480 /*
7481 * There are two kinds of data selectors, normal and expand down.
7482 */
7483 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7484 {
7485 if ( GCPtrFirst32 > pSel->u32Limit
7486 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7487 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7488 }
7489 else
7490 {
7491 /*
7492 * The upper boundary is defined by the B bit, not the G bit!
7493 */
7494 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7495 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7496 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7497 }
7498 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7499 }
7500 else
7501 {
7502
7503 /*
7504 * Code selector and usually be used to read thru, writing is
7505 * only permitted in real and V8086 mode.
7506 */
7507 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7508 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7509 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7510 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7511 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7512
7513 if ( GCPtrFirst32 > pSel->u32Limit
7514 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7515 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7516
7517 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7518 {
7519 /** @todo CPL check. */
7520 }
7521
7522 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7523 }
7524 }
7525 else
7526 return iemRaiseGeneralProtectionFault0(pVCpu);
7527 return VINF_SUCCESS;
7528 }
7529
7530 case IEMMODE_64BIT:
7531 {
7532 RTGCPTR GCPtrMem = *pGCPtrMem;
7533 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7534 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7535
7536 Assert(cbMem >= 1);
7537 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7538 return VINF_SUCCESS;
7539 return iemRaiseGeneralProtectionFault0(pVCpu);
7540 }
7541
7542 default:
7543 AssertFailedReturn(VERR_IEM_IPE_7);
7544 }
7545}
7546
7547
7548/**
7549 * Translates a virtual address to a physical physical address and checks if we
7550 * can access the page as specified.
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 * @param GCPtrMem The virtual address.
7554 * @param fAccess The intended access.
7555 * @param pGCPhysMem Where to return the physical address.
7556 */
7557IEM_STATIC VBOXSTRICTRC
7558iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7559{
7560 /** @todo Need a different PGM interface here. We're currently using
7561 * generic / REM interfaces. this won't cut it for R0 & RC. */
7562 RTGCPHYS GCPhys;
7563 uint64_t fFlags;
7564 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7565 if (RT_FAILURE(rc))
7566 {
7567 /** @todo Check unassigned memory in unpaged mode. */
7568 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7569 *pGCPhysMem = NIL_RTGCPHYS;
7570 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7571 }
7572
7573 /* If the page is writable and does not have the no-exec bit set, all
7574 access is allowed. Otherwise we'll have to check more carefully... */
7575 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7576 {
7577 /* Write to read only memory? */
7578 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7579 && !(fFlags & X86_PTE_RW)
7580 && ( pVCpu->iem.s.uCpl != 0
7581 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7582 {
7583 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7584 *pGCPhysMem = NIL_RTGCPHYS;
7585 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7586 }
7587
7588 /* Kernel memory accessed by userland? */
7589 if ( !(fFlags & X86_PTE_US)
7590 && pVCpu->iem.s.uCpl == 3
7591 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7592 {
7593 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7594 *pGCPhysMem = NIL_RTGCPHYS;
7595 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7596 }
7597
7598 /* Executing non-executable memory? */
7599 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7600 && (fFlags & X86_PTE_PAE_NX)
7601 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7602 {
7603 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7604 *pGCPhysMem = NIL_RTGCPHYS;
7605 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7606 VERR_ACCESS_DENIED);
7607 }
7608 }
7609
7610 /*
7611 * Set the dirty / access flags.
7612 * ASSUMES this is set when the address is translated rather than on committ...
7613 */
7614 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7615 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7616 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7617 {
7618 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7619 AssertRC(rc2);
7620 }
7621
7622 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7623 *pGCPhysMem = GCPhys;
7624 return VINF_SUCCESS;
7625}
7626
7627
7628
7629/**
7630 * Maps a physical page.
7631 *
7632 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7634 * @param GCPhysMem The physical address.
7635 * @param fAccess The intended access.
7636 * @param ppvMem Where to return the mapping address.
7637 * @param pLock The PGM lock.
7638 */
7639IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7640{
7641#ifdef IEM_VERIFICATION_MODE_FULL
7642 /* Force the alternative path so we can ignore writes. */
7643 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7644 {
7645 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7646 {
7647 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7648 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7649 if (RT_FAILURE(rc2))
7650 pVCpu->iem.s.fProblematicMemory = true;
7651 }
7652 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7653 }
7654#endif
7655#ifdef IEM_LOG_MEMORY_WRITES
7656 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7657 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7658#endif
7659#ifdef IEM_VERIFICATION_MODE_MINIMAL
7660 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7661#endif
7662
7663 /** @todo This API may require some improving later. A private deal with PGM
7664 * regarding locking and unlocking needs to be struct. A couple of TLBs
7665 * living in PGM, but with publicly accessible inlined access methods
7666 * could perhaps be an even better solution. */
7667 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7668 GCPhysMem,
7669 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7670 pVCpu->iem.s.fBypassHandlers,
7671 ppvMem,
7672 pLock);
7673 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7674 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7675
7676#ifdef IEM_VERIFICATION_MODE_FULL
7677 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7678 pVCpu->iem.s.fProblematicMemory = true;
7679#endif
7680 return rc;
7681}
7682
7683
7684/**
7685 * Unmap a page previously mapped by iemMemPageMap.
7686 *
7687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7688 * @param GCPhysMem The physical address.
7689 * @param fAccess The intended access.
7690 * @param pvMem What iemMemPageMap returned.
7691 * @param pLock The PGM lock.
7692 */
7693DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7694{
7695 NOREF(pVCpu);
7696 NOREF(GCPhysMem);
7697 NOREF(fAccess);
7698 NOREF(pvMem);
7699 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7700}
7701
7702
7703/**
7704 * Looks up a memory mapping entry.
7705 *
7706 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7708 * @param pvMem The memory address.
7709 * @param fAccess The access to.
7710 */
7711DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7712{
7713 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7714 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7715 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7716 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7717 return 0;
7718 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7719 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7720 return 1;
7721 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7722 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7723 return 2;
7724 return VERR_NOT_FOUND;
7725}
7726
7727
7728/**
7729 * Finds a free memmap entry when using iNextMapping doesn't work.
7730 *
7731 * @returns Memory mapping index, 1024 on failure.
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 */
7734IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7735{
7736 /*
7737 * The easy case.
7738 */
7739 if (pVCpu->iem.s.cActiveMappings == 0)
7740 {
7741 pVCpu->iem.s.iNextMapping = 1;
7742 return 0;
7743 }
7744
7745 /* There should be enough mappings for all instructions. */
7746 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7747
7748 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7749 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7750 return i;
7751
7752 AssertFailedReturn(1024);
7753}
7754
7755
7756/**
7757 * Commits a bounce buffer that needs writing back and unmaps it.
7758 *
7759 * @returns Strict VBox status code.
7760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7761 * @param iMemMap The index of the buffer to commit.
7762 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7763 * Always false in ring-3, obviously.
7764 */
7765IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7766{
7767 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7768 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7769#ifdef IN_RING3
7770 Assert(!fPostponeFail);
7771#endif
7772
7773 /*
7774 * Do the writing.
7775 */
7776#ifndef IEM_VERIFICATION_MODE_MINIMAL
7777 PVM pVM = pVCpu->CTX_SUFF(pVM);
7778 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7779 && !IEM_VERIFICATION_ENABLED(pVCpu))
7780 {
7781 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7782 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7783 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7784 if (!pVCpu->iem.s.fBypassHandlers)
7785 {
7786 /*
7787 * Carefully and efficiently dealing with access handler return
7788 * codes make this a little bloated.
7789 */
7790 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7791 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7792 pbBuf,
7793 cbFirst,
7794 PGMACCESSORIGIN_IEM);
7795 if (rcStrict == VINF_SUCCESS)
7796 {
7797 if (cbSecond)
7798 {
7799 rcStrict = PGMPhysWrite(pVM,
7800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7801 pbBuf + cbFirst,
7802 cbSecond,
7803 PGMACCESSORIGIN_IEM);
7804 if (rcStrict == VINF_SUCCESS)
7805 { /* nothing */ }
7806 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7807 {
7808 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7811 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7812 }
7813# ifndef IN_RING3
7814 else if (fPostponeFail)
7815 {
7816 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7820 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7821 return iemSetPassUpStatus(pVCpu, rcStrict);
7822 }
7823# endif
7824 else
7825 {
7826 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7827 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7829 return rcStrict;
7830 }
7831 }
7832 }
7833 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7834 {
7835 if (!cbSecond)
7836 {
7837 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7839 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7840 }
7841 else
7842 {
7843 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7845 pbBuf + cbFirst,
7846 cbSecond,
7847 PGMACCESSORIGIN_IEM);
7848 if (rcStrict2 == VINF_SUCCESS)
7849 {
7850 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7853 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7854 }
7855 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7856 {
7857 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7858 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7859 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7860 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7861 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7862 }
7863# ifndef IN_RING3
7864 else if (fPostponeFail)
7865 {
7866 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7869 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7870 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7871 return iemSetPassUpStatus(pVCpu, rcStrict);
7872 }
7873# endif
7874 else
7875 {
7876 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7879 return rcStrict2;
7880 }
7881 }
7882 }
7883# ifndef IN_RING3
7884 else if (fPostponeFail)
7885 {
7886 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7889 if (!cbSecond)
7890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7891 else
7892 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7893 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7894 return iemSetPassUpStatus(pVCpu, rcStrict);
7895 }
7896# endif
7897 else
7898 {
7899 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7902 return rcStrict;
7903 }
7904 }
7905 else
7906 {
7907 /*
7908 * No access handlers, much simpler.
7909 */
7910 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7911 if (RT_SUCCESS(rc))
7912 {
7913 if (cbSecond)
7914 {
7915 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7916 if (RT_SUCCESS(rc))
7917 { /* likely */ }
7918 else
7919 {
7920 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7923 return rc;
7924 }
7925 }
7926 }
7927 else
7928 {
7929 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7932 return rc;
7933 }
7934 }
7935 }
7936#endif
7937
7938#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7939 /*
7940 * Record the write(s).
7941 */
7942 if (!pVCpu->iem.s.fNoRem)
7943 {
7944 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7945 if (pEvtRec)
7946 {
7947 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7948 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7949 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7950 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7951 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7952 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7953 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7954 }
7955 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7956 {
7957 pEvtRec = iemVerifyAllocRecord(pVCpu);
7958 if (pEvtRec)
7959 {
7960 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7961 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7962 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7963 memcpy(pEvtRec->u.RamWrite.ab,
7964 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7965 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7966 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7967 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7968 }
7969 }
7970 }
7971#endif
7972#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7973 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7974 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7975 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7976 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7977 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7978 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7979
7980 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7981 g_cbIemWrote = cbWrote;
7982 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7983#endif
7984
7985 /*
7986 * Free the mapping entry.
7987 */
7988 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7989 Assert(pVCpu->iem.s.cActiveMappings != 0);
7990 pVCpu->iem.s.cActiveMappings--;
7991 return VINF_SUCCESS;
7992}
7993
7994
7995/**
7996 * iemMemMap worker that deals with a request crossing pages.
7997 */
7998IEM_STATIC VBOXSTRICTRC
7999iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8000{
8001 /*
8002 * Do the address translations.
8003 */
8004 RTGCPHYS GCPhysFirst;
8005 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8006 if (rcStrict != VINF_SUCCESS)
8007 return rcStrict;
8008
8009 RTGCPHYS GCPhysSecond;
8010 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8011 fAccess, &GCPhysSecond);
8012 if (rcStrict != VINF_SUCCESS)
8013 return rcStrict;
8014 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8015
8016 PVM pVM = pVCpu->CTX_SUFF(pVM);
8017#ifdef IEM_VERIFICATION_MODE_FULL
8018 /*
8019 * Detect problematic memory when verifying so we can select
8020 * the right execution engine. (TLB: Redo this.)
8021 */
8022 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8023 {
8024 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8025 if (RT_SUCCESS(rc2))
8026 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8027 if (RT_FAILURE(rc2))
8028 pVCpu->iem.s.fProblematicMemory = true;
8029 }
8030#endif
8031
8032
8033 /*
8034 * Read in the current memory content if it's a read, execute or partial
8035 * write access.
8036 */
8037 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8038 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8039 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8040
8041 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8042 {
8043 if (!pVCpu->iem.s.fBypassHandlers)
8044 {
8045 /*
8046 * Must carefully deal with access handler status codes here,
8047 * makes the code a bit bloated.
8048 */
8049 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8050 if (rcStrict == VINF_SUCCESS)
8051 {
8052 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8053 if (rcStrict == VINF_SUCCESS)
8054 { /*likely */ }
8055 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8056 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8057 else
8058 {
8059 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8060 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8061 return rcStrict;
8062 }
8063 }
8064 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8065 {
8066 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8067 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8068 {
8069 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8070 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8071 }
8072 else
8073 {
8074 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8075 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8076 return rcStrict2;
8077 }
8078 }
8079 else
8080 {
8081 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8082 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8083 return rcStrict;
8084 }
8085 }
8086 else
8087 {
8088 /*
8089 * No informational status codes here, much more straight forward.
8090 */
8091 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8092 if (RT_SUCCESS(rc))
8093 {
8094 Assert(rc == VINF_SUCCESS);
8095 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8096 if (RT_SUCCESS(rc))
8097 Assert(rc == VINF_SUCCESS);
8098 else
8099 {
8100 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8101 return rc;
8102 }
8103 }
8104 else
8105 {
8106 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8107 return rc;
8108 }
8109 }
8110
8111#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8112 if ( !pVCpu->iem.s.fNoRem
8113 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8114 {
8115 /*
8116 * Record the reads.
8117 */
8118 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8119 if (pEvtRec)
8120 {
8121 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8122 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8123 pEvtRec->u.RamRead.cb = cbFirstPage;
8124 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8125 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8126 }
8127 pEvtRec = iemVerifyAllocRecord(pVCpu);
8128 if (pEvtRec)
8129 {
8130 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8131 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8132 pEvtRec->u.RamRead.cb = cbSecondPage;
8133 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8134 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8135 }
8136 }
8137#endif
8138 }
8139#ifdef VBOX_STRICT
8140 else
8141 memset(pbBuf, 0xcc, cbMem);
8142 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8143 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8144#endif
8145
8146 /*
8147 * Commit the bounce buffer entry.
8148 */
8149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8150 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8151 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8152 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8153 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8154 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8155 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8156 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8157 pVCpu->iem.s.cActiveMappings++;
8158
8159 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8160 *ppvMem = pbBuf;
8161 return VINF_SUCCESS;
8162}
8163
8164
8165/**
8166 * iemMemMap woker that deals with iemMemPageMap failures.
8167 */
8168IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8169 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8170{
8171 /*
8172 * Filter out conditions we can handle and the ones which shouldn't happen.
8173 */
8174 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8175 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8176 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8177 {
8178 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8179 return rcMap;
8180 }
8181 pVCpu->iem.s.cPotentialExits++;
8182
8183 /*
8184 * Read in the current memory content if it's a read, execute or partial
8185 * write access.
8186 */
8187 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8188 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8189 {
8190 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8191 memset(pbBuf, 0xff, cbMem);
8192 else
8193 {
8194 int rc;
8195 if (!pVCpu->iem.s.fBypassHandlers)
8196 {
8197 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8198 if (rcStrict == VINF_SUCCESS)
8199 { /* nothing */ }
8200 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8201 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8202 else
8203 {
8204 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8205 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8206 return rcStrict;
8207 }
8208 }
8209 else
8210 {
8211 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8212 if (RT_SUCCESS(rc))
8213 { /* likely */ }
8214 else
8215 {
8216 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8217 GCPhysFirst, rc));
8218 return rc;
8219 }
8220 }
8221 }
8222
8223#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8224 if ( !pVCpu->iem.s.fNoRem
8225 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8226 {
8227 /*
8228 * Record the read.
8229 */
8230 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8231 if (pEvtRec)
8232 {
8233 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8234 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8235 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8236 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8237 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8238 }
8239 }
8240#endif
8241 }
8242#ifdef VBOX_STRICT
8243 else
8244 memset(pbBuf, 0xcc, cbMem);
8245#endif
8246#ifdef VBOX_STRICT
8247 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8248 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8249#endif
8250
8251 /*
8252 * Commit the bounce buffer entry.
8253 */
8254 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8255 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8256 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8257 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8258 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8259 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8260 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8261 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8262 pVCpu->iem.s.cActiveMappings++;
8263
8264 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8265 *ppvMem = pbBuf;
8266 return VINF_SUCCESS;
8267}
8268
8269
8270
8271/**
8272 * Maps the specified guest memory for the given kind of access.
8273 *
8274 * This may be using bounce buffering of the memory if it's crossing a page
8275 * boundary or if there is an access handler installed for any of it. Because
8276 * of lock prefix guarantees, we're in for some extra clutter when this
8277 * happens.
8278 *
8279 * This may raise a \#GP, \#SS, \#PF or \#AC.
8280 *
8281 * @returns VBox strict status code.
8282 *
8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8284 * @param ppvMem Where to return the pointer to the mapped
8285 * memory.
8286 * @param cbMem The number of bytes to map. This is usually 1,
8287 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8288 * string operations it can be up to a page.
8289 * @param iSegReg The index of the segment register to use for
8290 * this access. The base and limits are checked.
8291 * Use UINT8_MAX to indicate that no segmentation
8292 * is required (for IDT, GDT and LDT accesses).
8293 * @param GCPtrMem The address of the guest memory.
8294 * @param fAccess How the memory is being accessed. The
8295 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8296 * how to map the memory, while the
8297 * IEM_ACCESS_WHAT_XXX bit is used when raising
8298 * exceptions.
8299 */
8300IEM_STATIC VBOXSTRICTRC
8301iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8302{
8303 /*
8304 * Check the input and figure out which mapping entry to use.
8305 */
8306 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8307 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8308 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8309
8310 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8311 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8312 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8313 {
8314 iMemMap = iemMemMapFindFree(pVCpu);
8315 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8316 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8317 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8318 pVCpu->iem.s.aMemMappings[2].fAccess),
8319 VERR_IEM_IPE_9);
8320 }
8321
8322 /*
8323 * Map the memory, checking that we can actually access it. If something
8324 * slightly complicated happens, fall back on bounce buffering.
8325 */
8326 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8327 if (rcStrict != VINF_SUCCESS)
8328 return rcStrict;
8329
8330 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8331 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8332
8333 RTGCPHYS GCPhysFirst;
8334 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8335 if (rcStrict != VINF_SUCCESS)
8336 return rcStrict;
8337
8338 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8339 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8340 if (fAccess & IEM_ACCESS_TYPE_READ)
8341 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8342
8343 void *pvMem;
8344 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8345 if (rcStrict != VINF_SUCCESS)
8346 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8347
8348 /*
8349 * Fill in the mapping table entry.
8350 */
8351 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8352 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8353 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8354 pVCpu->iem.s.cActiveMappings++;
8355
8356 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8357 *ppvMem = pvMem;
8358 return VINF_SUCCESS;
8359}
8360
8361
8362/**
8363 * Commits the guest memory if bounce buffered and unmaps it.
8364 *
8365 * @returns Strict VBox status code.
8366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8367 * @param pvMem The mapping.
8368 * @param fAccess The kind of access.
8369 */
8370IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8371{
8372 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8373 AssertReturn(iMemMap >= 0, iMemMap);
8374
8375 /* If it's bounce buffered, we may need to write back the buffer. */
8376 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8377 {
8378 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8379 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8380 }
8381 /* Otherwise unlock it. */
8382 else
8383 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8384
8385 /* Free the entry. */
8386 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8387 Assert(pVCpu->iem.s.cActiveMappings != 0);
8388 pVCpu->iem.s.cActiveMappings--;
8389 return VINF_SUCCESS;
8390}
8391
8392#ifdef IEM_WITH_SETJMP
8393
8394/**
8395 * Maps the specified guest memory for the given kind of access, longjmp on
8396 * error.
8397 *
8398 * This may be using bounce buffering of the memory if it's crossing a page
8399 * boundary or if there is an access handler installed for any of it. Because
8400 * of lock prefix guarantees, we're in for some extra clutter when this
8401 * happens.
8402 *
8403 * This may raise a \#GP, \#SS, \#PF or \#AC.
8404 *
8405 * @returns Pointer to the mapped memory.
8406 *
8407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8408 * @param cbMem The number of bytes to map. This is usually 1,
8409 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8410 * string operations it can be up to a page.
8411 * @param iSegReg The index of the segment register to use for
8412 * this access. The base and limits are checked.
8413 * Use UINT8_MAX to indicate that no segmentation
8414 * is required (for IDT, GDT and LDT accesses).
8415 * @param GCPtrMem The address of the guest memory.
8416 * @param fAccess How the memory is being accessed. The
8417 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8418 * how to map the memory, while the
8419 * IEM_ACCESS_WHAT_XXX bit is used when raising
8420 * exceptions.
8421 */
8422IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8423{
8424 /*
8425 * Check the input and figure out which mapping entry to use.
8426 */
8427 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8428 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8429 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8430
8431 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8432 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8433 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8434 {
8435 iMemMap = iemMemMapFindFree(pVCpu);
8436 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8437 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8438 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8439 pVCpu->iem.s.aMemMappings[2].fAccess),
8440 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8441 }
8442
8443 /*
8444 * Map the memory, checking that we can actually access it. If something
8445 * slightly complicated happens, fall back on bounce buffering.
8446 */
8447 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8448 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8449 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8450
8451 /* Crossing a page boundary? */
8452 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8453 { /* No (likely). */ }
8454 else
8455 {
8456 void *pvMem;
8457 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8458 if (rcStrict == VINF_SUCCESS)
8459 return pvMem;
8460 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8461 }
8462
8463 RTGCPHYS GCPhysFirst;
8464 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8465 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8466 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8467
8468 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8469 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8470 if (fAccess & IEM_ACCESS_TYPE_READ)
8471 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8472
8473 void *pvMem;
8474 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8475 if (rcStrict == VINF_SUCCESS)
8476 { /* likely */ }
8477 else
8478 {
8479 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8480 if (rcStrict == VINF_SUCCESS)
8481 return pvMem;
8482 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8483 }
8484
8485 /*
8486 * Fill in the mapping table entry.
8487 */
8488 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8489 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8490 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8491 pVCpu->iem.s.cActiveMappings++;
8492
8493 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8494 return pvMem;
8495}
8496
8497
8498/**
8499 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8500 *
8501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8502 * @param pvMem The mapping.
8503 * @param fAccess The kind of access.
8504 */
8505IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8506{
8507 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8508 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8509
8510 /* If it's bounce buffered, we may need to write back the buffer. */
8511 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8512 {
8513 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8514 {
8515 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8516 if (rcStrict == VINF_SUCCESS)
8517 return;
8518 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8519 }
8520 }
8521 /* Otherwise unlock it. */
8522 else
8523 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8524
8525 /* Free the entry. */
8526 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8527 Assert(pVCpu->iem.s.cActiveMappings != 0);
8528 pVCpu->iem.s.cActiveMappings--;
8529}
8530
8531#endif
8532
8533#ifndef IN_RING3
8534/**
8535 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8536 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8537 *
8538 * Allows the instruction to be completed and retired, while the IEM user will
8539 * return to ring-3 immediately afterwards and do the postponed writes there.
8540 *
8541 * @returns VBox status code (no strict statuses). Caller must check
8542 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8544 * @param pvMem The mapping.
8545 * @param fAccess The kind of access.
8546 */
8547IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8548{
8549 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8550 AssertReturn(iMemMap >= 0, iMemMap);
8551
8552 /* If it's bounce buffered, we may need to write back the buffer. */
8553 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8554 {
8555 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8556 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8557 }
8558 /* Otherwise unlock it. */
8559 else
8560 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8561
8562 /* Free the entry. */
8563 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8564 Assert(pVCpu->iem.s.cActiveMappings != 0);
8565 pVCpu->iem.s.cActiveMappings--;
8566 return VINF_SUCCESS;
8567}
8568#endif
8569
8570
8571/**
8572 * Rollbacks mappings, releasing page locks and such.
8573 *
8574 * The caller shall only call this after checking cActiveMappings.
8575 *
8576 * @returns Strict VBox status code to pass up.
8577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8578 */
8579IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8580{
8581 Assert(pVCpu->iem.s.cActiveMappings > 0);
8582
8583 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8584 while (iMemMap-- > 0)
8585 {
8586 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8587 if (fAccess != IEM_ACCESS_INVALID)
8588 {
8589 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8590 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8591 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8592 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8593 Assert(pVCpu->iem.s.cActiveMappings > 0);
8594 pVCpu->iem.s.cActiveMappings--;
8595 }
8596 }
8597}
8598
8599
8600/**
8601 * Fetches a data byte.
8602 *
8603 * @returns Strict VBox status code.
8604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8605 * @param pu8Dst Where to return the byte.
8606 * @param iSegReg The index of the segment register to use for
8607 * this access. The base and limits are checked.
8608 * @param GCPtrMem The address of the guest memory.
8609 */
8610IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8611{
8612 /* The lazy approach for now... */
8613 uint8_t const *pu8Src;
8614 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8615 if (rc == VINF_SUCCESS)
8616 {
8617 *pu8Dst = *pu8Src;
8618 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8619 }
8620 return rc;
8621}
8622
8623
8624#ifdef IEM_WITH_SETJMP
8625/**
8626 * Fetches a data byte, longjmp on error.
8627 *
8628 * @returns The byte.
8629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8630 * @param iSegReg The index of the segment register to use for
8631 * this access. The base and limits are checked.
8632 * @param GCPtrMem The address of the guest memory.
8633 */
8634DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8635{
8636 /* The lazy approach for now... */
8637 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8638 uint8_t const bRet = *pu8Src;
8639 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8640 return bRet;
8641}
8642#endif /* IEM_WITH_SETJMP */
8643
8644
8645/**
8646 * Fetches a data word.
8647 *
8648 * @returns Strict VBox status code.
8649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8650 * @param pu16Dst Where to return the word.
8651 * @param iSegReg The index of the segment register to use for
8652 * this access. The base and limits are checked.
8653 * @param GCPtrMem The address of the guest memory.
8654 */
8655IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8656{
8657 /* The lazy approach for now... */
8658 uint16_t const *pu16Src;
8659 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8660 if (rc == VINF_SUCCESS)
8661 {
8662 *pu16Dst = *pu16Src;
8663 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8664 }
8665 return rc;
8666}
8667
8668
8669#ifdef IEM_WITH_SETJMP
8670/**
8671 * Fetches a data word, longjmp on error.
8672 *
8673 * @returns The word
8674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8675 * @param iSegReg The index of the segment register to use for
8676 * this access. The base and limits are checked.
8677 * @param GCPtrMem The address of the guest memory.
8678 */
8679DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8680{
8681 /* The lazy approach for now... */
8682 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8683 uint16_t const u16Ret = *pu16Src;
8684 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8685 return u16Ret;
8686}
8687#endif
8688
8689
8690/**
8691 * Fetches a data dword.
8692 *
8693 * @returns Strict VBox status code.
8694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8695 * @param pu32Dst Where to return the dword.
8696 * @param iSegReg The index of the segment register to use for
8697 * this access. The base and limits are checked.
8698 * @param GCPtrMem The address of the guest memory.
8699 */
8700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8701{
8702 /* The lazy approach for now... */
8703 uint32_t const *pu32Src;
8704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8705 if (rc == VINF_SUCCESS)
8706 {
8707 *pu32Dst = *pu32Src;
8708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8709 }
8710 return rc;
8711}
8712
8713
8714#ifdef IEM_WITH_SETJMP
8715
8716IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8717{
8718 Assert(cbMem >= 1);
8719 Assert(iSegReg < X86_SREG_COUNT);
8720
8721 /*
8722 * 64-bit mode is simpler.
8723 */
8724 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8725 {
8726 if (iSegReg >= X86_SREG_FS)
8727 {
8728 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8729 GCPtrMem += pSel->u64Base;
8730 }
8731
8732 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8733 return GCPtrMem;
8734 }
8735 /*
8736 * 16-bit and 32-bit segmentation.
8737 */
8738 else
8739 {
8740 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8741 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8742 == X86DESCATTR_P /* data, expand up */
8743 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8744 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8745 {
8746 /* expand up */
8747 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8748 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8749 && GCPtrLast32 > (uint32_t)GCPtrMem))
8750 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8751 }
8752 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8753 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8754 {
8755 /* expand down */
8756 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8757 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8758 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8759 && GCPtrLast32 > (uint32_t)GCPtrMem))
8760 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8761 }
8762 else
8763 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8764 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8765 }
8766 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8767}
8768
8769
8770IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8771{
8772 Assert(cbMem >= 1);
8773 Assert(iSegReg < X86_SREG_COUNT);
8774
8775 /*
8776 * 64-bit mode is simpler.
8777 */
8778 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8779 {
8780 if (iSegReg >= X86_SREG_FS)
8781 {
8782 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8783 GCPtrMem += pSel->u64Base;
8784 }
8785
8786 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8787 return GCPtrMem;
8788 }
8789 /*
8790 * 16-bit and 32-bit segmentation.
8791 */
8792 else
8793 {
8794 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8795 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8796 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8797 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8798 {
8799 /* expand up */
8800 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8801 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8802 && GCPtrLast32 > (uint32_t)GCPtrMem))
8803 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8804 }
8805 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8806 {
8807 /* expand down */
8808 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8809 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8810 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8811 && GCPtrLast32 > (uint32_t)GCPtrMem))
8812 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8813 }
8814 else
8815 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8816 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8817 }
8818 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8819}
8820
8821
8822/**
8823 * Fetches a data dword, longjmp on error, fallback/safe version.
8824 *
8825 * @returns The dword
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param iSegReg The index of the segment register to use for
8828 * this access. The base and limits are checked.
8829 * @param GCPtrMem The address of the guest memory.
8830 */
8831IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8832{
8833 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8834 uint32_t const u32Ret = *pu32Src;
8835 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8836 return u32Ret;
8837}
8838
8839
8840/**
8841 * Fetches a data dword, longjmp on error.
8842 *
8843 * @returns The dword
8844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8845 * @param iSegReg The index of the segment register to use for
8846 * this access. The base and limits are checked.
8847 * @param GCPtrMem The address of the guest memory.
8848 */
8849DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8850{
8851# ifdef IEM_WITH_DATA_TLB
8852 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8853 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8854 {
8855 /// @todo more later.
8856 }
8857
8858 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8859# else
8860 /* The lazy approach. */
8861 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8862 uint32_t const u32Ret = *pu32Src;
8863 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8864 return u32Ret;
8865# endif
8866}
8867#endif
8868
8869
8870#ifdef SOME_UNUSED_FUNCTION
8871/**
8872 * Fetches a data dword and sign extends it to a qword.
8873 *
8874 * @returns Strict VBox status code.
8875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8876 * @param pu64Dst Where to return the sign extended value.
8877 * @param iSegReg The index of the segment register to use for
8878 * this access. The base and limits are checked.
8879 * @param GCPtrMem The address of the guest memory.
8880 */
8881IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8882{
8883 /* The lazy approach for now... */
8884 int32_t const *pi32Src;
8885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8886 if (rc == VINF_SUCCESS)
8887 {
8888 *pu64Dst = *pi32Src;
8889 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8890 }
8891#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8892 else
8893 *pu64Dst = 0;
8894#endif
8895 return rc;
8896}
8897#endif
8898
8899
8900/**
8901 * Fetches a data qword.
8902 *
8903 * @returns Strict VBox status code.
8904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8905 * @param pu64Dst Where to return the qword.
8906 * @param iSegReg The index of the segment register to use for
8907 * this access. The base and limits are checked.
8908 * @param GCPtrMem The address of the guest memory.
8909 */
8910IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8911{
8912 /* The lazy approach for now... */
8913 uint64_t const *pu64Src;
8914 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8915 if (rc == VINF_SUCCESS)
8916 {
8917 *pu64Dst = *pu64Src;
8918 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8919 }
8920 return rc;
8921}
8922
8923
8924#ifdef IEM_WITH_SETJMP
8925/**
8926 * Fetches a data qword, longjmp on error.
8927 *
8928 * @returns The qword.
8929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8930 * @param iSegReg The index of the segment register to use for
8931 * this access. The base and limits are checked.
8932 * @param GCPtrMem The address of the guest memory.
8933 */
8934DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8935{
8936 /* The lazy approach for now... */
8937 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8938 uint64_t const u64Ret = *pu64Src;
8939 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8940 return u64Ret;
8941}
8942#endif
8943
8944
8945/**
8946 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8947 *
8948 * @returns Strict VBox status code.
8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8950 * @param pu64Dst Where to return the qword.
8951 * @param iSegReg The index of the segment register to use for
8952 * this access. The base and limits are checked.
8953 * @param GCPtrMem The address of the guest memory.
8954 */
8955IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8956{
8957 /* The lazy approach for now... */
8958 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8959 if (RT_UNLIKELY(GCPtrMem & 15))
8960 return iemRaiseGeneralProtectionFault0(pVCpu);
8961
8962 uint64_t const *pu64Src;
8963 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8964 if (rc == VINF_SUCCESS)
8965 {
8966 *pu64Dst = *pu64Src;
8967 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8968 }
8969 return rc;
8970}
8971
8972
8973#ifdef IEM_WITH_SETJMP
8974/**
8975 * Fetches a data qword, longjmp on error.
8976 *
8977 * @returns The qword.
8978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8979 * @param iSegReg The index of the segment register to use for
8980 * this access. The base and limits are checked.
8981 * @param GCPtrMem The address of the guest memory.
8982 */
8983DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8984{
8985 /* The lazy approach for now... */
8986 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8987 if (RT_LIKELY(!(GCPtrMem & 15)))
8988 {
8989 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8990 uint64_t const u64Ret = *pu64Src;
8991 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8992 return u64Ret;
8993 }
8994
8995 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
8996 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
8997}
8998#endif
8999
9000
9001/**
9002 * Fetches a data tword.
9003 *
9004 * @returns Strict VBox status code.
9005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9006 * @param pr80Dst Where to return the tword.
9007 * @param iSegReg The index of the segment register to use for
9008 * this access. The base and limits are checked.
9009 * @param GCPtrMem The address of the guest memory.
9010 */
9011IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9012{
9013 /* The lazy approach for now... */
9014 PCRTFLOAT80U pr80Src;
9015 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9016 if (rc == VINF_SUCCESS)
9017 {
9018 *pr80Dst = *pr80Src;
9019 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9020 }
9021 return rc;
9022}
9023
9024
9025#ifdef IEM_WITH_SETJMP
9026/**
9027 * Fetches a data tword, longjmp on error.
9028 *
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 * @param pr80Dst Where to return the tword.
9031 * @param iSegReg The index of the segment register to use for
9032 * this access. The base and limits are checked.
9033 * @param GCPtrMem The address of the guest memory.
9034 */
9035DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9036{
9037 /* The lazy approach for now... */
9038 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9039 *pr80Dst = *pr80Src;
9040 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9041}
9042#endif
9043
9044
9045/**
9046 * Fetches a data dqword (double qword), generally SSE related.
9047 *
9048 * @returns Strict VBox status code.
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pu128Dst Where to return the qword.
9051 * @param iSegReg The index of the segment register to use for
9052 * this access. The base and limits are checked.
9053 * @param GCPtrMem The address of the guest memory.
9054 */
9055IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9056{
9057 /* The lazy approach for now... */
9058 uint128_t const *pu128Src;
9059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9060 if (rc == VINF_SUCCESS)
9061 {
9062 *pu128Dst = *pu128Src;
9063 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9064 }
9065 return rc;
9066}
9067
9068
9069#ifdef IEM_WITH_SETJMP
9070/**
9071 * Fetches a data dqword (double qword), generally SSE related.
9072 *
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pu128Dst Where to return the qword.
9075 * @param iSegReg The index of the segment register to use for
9076 * this access. The base and limits are checked.
9077 * @param GCPtrMem The address of the guest memory.
9078 */
9079IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9080{
9081 /* The lazy approach for now... */
9082 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9083 *pu128Dst = *pu128Src;
9084 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9085}
9086#endif
9087
9088
9089/**
9090 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9091 * related.
9092 *
9093 * Raises \#GP(0) if not aligned.
9094 *
9095 * @returns Strict VBox status code.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 * @param pu128Dst Where to return the qword.
9098 * @param iSegReg The index of the segment register to use for
9099 * this access. The base and limits are checked.
9100 * @param GCPtrMem The address of the guest memory.
9101 */
9102IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9103{
9104 /* The lazy approach for now... */
9105 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9106 if ( (GCPtrMem & 15)
9107 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9108 return iemRaiseGeneralProtectionFault0(pVCpu);
9109
9110 uint128_t const *pu128Src;
9111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9112 if (rc == VINF_SUCCESS)
9113 {
9114 *pu128Dst = *pu128Src;
9115 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9116 }
9117 return rc;
9118}
9119
9120
9121#ifdef IEM_WITH_SETJMP
9122/**
9123 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9124 * related, longjmp on error.
9125 *
9126 * Raises \#GP(0) if not aligned.
9127 *
9128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9129 * @param pu128Dst Where to return the qword.
9130 * @param iSegReg The index of the segment register to use for
9131 * this access. The base and limits are checked.
9132 * @param GCPtrMem The address of the guest memory.
9133 */
9134DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9135{
9136 /* The lazy approach for now... */
9137 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9138 if ( (GCPtrMem & 15) == 0
9139 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9140 {
9141 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9142 IEM_ACCESS_DATA_R);
9143 *pu128Dst = *pu128Src;
9144 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9145 return;
9146 }
9147
9148 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9149 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9150}
9151#endif
9152
9153
9154
9155/**
9156 * Fetches a descriptor register (lgdt, lidt).
9157 *
9158 * @returns Strict VBox status code.
9159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9160 * @param pcbLimit Where to return the limit.
9161 * @param pGCPtrBase Where to return the base.
9162 * @param iSegReg The index of the segment register to use for
9163 * this access. The base and limits are checked.
9164 * @param GCPtrMem The address of the guest memory.
9165 * @param enmOpSize The effective operand size.
9166 */
9167IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9168 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9169{
9170 /*
9171 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9172 * little special:
9173 * - The two reads are done separately.
9174 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9175 * - We suspect the 386 to actually commit the limit before the base in
9176 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9177 * don't try emulate this eccentric behavior, because it's not well
9178 * enough understood and rather hard to trigger.
9179 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9180 */
9181 VBOXSTRICTRC rcStrict;
9182 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9183 {
9184 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9185 if (rcStrict == VINF_SUCCESS)
9186 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9187 }
9188 else
9189 {
9190 uint32_t uTmp;
9191 if (enmOpSize == IEMMODE_32BIT)
9192 {
9193 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9194 {
9195 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9196 if (rcStrict == VINF_SUCCESS)
9197 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9198 }
9199 else
9200 {
9201 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9202 if (rcStrict == VINF_SUCCESS)
9203 {
9204 *pcbLimit = (uint16_t)uTmp;
9205 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9206 }
9207 }
9208 if (rcStrict == VINF_SUCCESS)
9209 *pGCPtrBase = uTmp;
9210 }
9211 else
9212 {
9213 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9214 if (rcStrict == VINF_SUCCESS)
9215 {
9216 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9217 if (rcStrict == VINF_SUCCESS)
9218 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9219 }
9220 }
9221 }
9222 return rcStrict;
9223}
9224
9225
9226
9227/**
9228 * Stores a data byte.
9229 *
9230 * @returns Strict VBox status code.
9231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9232 * @param iSegReg The index of the segment register to use for
9233 * this access. The base and limits are checked.
9234 * @param GCPtrMem The address of the guest memory.
9235 * @param u8Value The value to store.
9236 */
9237IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9238{
9239 /* The lazy approach for now... */
9240 uint8_t *pu8Dst;
9241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9242 if (rc == VINF_SUCCESS)
9243 {
9244 *pu8Dst = u8Value;
9245 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9246 }
9247 return rc;
9248}
9249
9250
9251#ifdef IEM_WITH_SETJMP
9252/**
9253 * Stores a data byte, longjmp on error.
9254 *
9255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9256 * @param iSegReg The index of the segment register to use for
9257 * this access. The base and limits are checked.
9258 * @param GCPtrMem The address of the guest memory.
9259 * @param u8Value The value to store.
9260 */
9261IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9262{
9263 /* The lazy approach for now... */
9264 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9265 *pu8Dst = u8Value;
9266 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9267}
9268#endif
9269
9270
9271/**
9272 * Stores a data word.
9273 *
9274 * @returns Strict VBox status code.
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param iSegReg The index of the segment register to use for
9277 * this access. The base and limits are checked.
9278 * @param GCPtrMem The address of the guest memory.
9279 * @param u16Value The value to store.
9280 */
9281IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9282{
9283 /* The lazy approach for now... */
9284 uint16_t *pu16Dst;
9285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9286 if (rc == VINF_SUCCESS)
9287 {
9288 *pu16Dst = u16Value;
9289 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9290 }
9291 return rc;
9292}
9293
9294
9295#ifdef IEM_WITH_SETJMP
9296/**
9297 * Stores a data word, longjmp on error.
9298 *
9299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 * @param u16Value The value to store.
9304 */
9305IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9306{
9307 /* The lazy approach for now... */
9308 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9309 *pu16Dst = u16Value;
9310 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9311}
9312#endif
9313
9314
9315/**
9316 * Stores a data dword.
9317 *
9318 * @returns Strict VBox status code.
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param iSegReg The index of the segment register to use for
9321 * this access. The base and limits are checked.
9322 * @param GCPtrMem The address of the guest memory.
9323 * @param u32Value The value to store.
9324 */
9325IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9326{
9327 /* The lazy approach for now... */
9328 uint32_t *pu32Dst;
9329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9330 if (rc == VINF_SUCCESS)
9331 {
9332 *pu32Dst = u32Value;
9333 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9334 }
9335 return rc;
9336}
9337
9338
9339#ifdef IEM_WITH_SETJMP
9340/**
9341 * Stores a data dword.
9342 *
9343 * @returns Strict VBox status code.
9344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9345 * @param iSegReg The index of the segment register to use for
9346 * this access. The base and limits are checked.
9347 * @param GCPtrMem The address of the guest memory.
9348 * @param u32Value The value to store.
9349 */
9350IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9351{
9352 /* The lazy approach for now... */
9353 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9354 *pu32Dst = u32Value;
9355 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9356}
9357#endif
9358
9359
9360/**
9361 * Stores a data qword.
9362 *
9363 * @returns Strict VBox status code.
9364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9365 * @param iSegReg The index of the segment register to use for
9366 * this access. The base and limits are checked.
9367 * @param GCPtrMem The address of the guest memory.
9368 * @param u64Value The value to store.
9369 */
9370IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9371{
9372 /* The lazy approach for now... */
9373 uint64_t *pu64Dst;
9374 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9375 if (rc == VINF_SUCCESS)
9376 {
9377 *pu64Dst = u64Value;
9378 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9379 }
9380 return rc;
9381}
9382
9383
9384#ifdef IEM_WITH_SETJMP
9385/**
9386 * Stores a data qword, longjmp on error.
9387 *
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 * @param u64Value The value to store.
9393 */
9394IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9395{
9396 /* The lazy approach for now... */
9397 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9398 *pu64Dst = u64Value;
9399 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9400}
9401#endif
9402
9403
9404/**
9405 * Stores a data dqword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 * @param u128Value The value to store.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9415{
9416 /* The lazy approach for now... */
9417 uint128_t *pu128Dst;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu128Dst = u128Value;
9422 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9423 }
9424 return rc;
9425}
9426
9427
9428#ifdef IEM_WITH_SETJMP
9429/**
9430 * Stores a data dqword, longjmp on error.
9431 *
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 * @param u128Value The value to store.
9437 */
9438IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9439{
9440 /* The lazy approach for now... */
9441 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9442 *pu128Dst = u128Value;
9443 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9444}
9445#endif
9446
9447
9448/**
9449 * Stores a data dqword, SSE aligned.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 * @param u128Value The value to store.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9459{
9460 /* The lazy approach for now... */
9461 if ( (GCPtrMem & 15)
9462 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9463 return iemRaiseGeneralProtectionFault0(pVCpu);
9464
9465 uint128_t *pu128Dst;
9466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9467 if (rc == VINF_SUCCESS)
9468 {
9469 *pu128Dst = u128Value;
9470 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9471 }
9472 return rc;
9473}
9474
9475
9476#ifdef IEM_WITH_SETJMP
9477/**
9478 * Stores a data dqword, SSE aligned.
9479 *
9480 * @returns Strict VBox status code.
9481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 * @param u128Value The value to store.
9486 */
9487DECL_NO_INLINE(IEM_STATIC, void)
9488iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9489{
9490 /* The lazy approach for now... */
9491 if ( (GCPtrMem & 15) == 0
9492 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9493 {
9494 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9495 *pu128Dst = u128Value;
9496 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9497 return;
9498 }
9499
9500 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9501 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9502}
9503#endif
9504
9505
9506/**
9507 * Stores a descriptor register (sgdt, sidt).
9508 *
9509 * @returns Strict VBox status code.
9510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9511 * @param cbLimit The limit.
9512 * @param GCPtrBase The base address.
9513 * @param iSegReg The index of the segment register to use for
9514 * this access. The base and limits are checked.
9515 * @param GCPtrMem The address of the guest memory.
9516 */
9517IEM_STATIC VBOXSTRICTRC
9518iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9519{
9520 /*
9521 * The SIDT and SGDT instructions actually stores the data using two
9522 * independent writes. The instructions does not respond to opsize prefixes.
9523 */
9524 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9525 if (rcStrict == VINF_SUCCESS)
9526 {
9527 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9528 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9529 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9530 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9531 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9532 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9533 else
9534 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9535 }
9536 return rcStrict;
9537}
9538
9539
9540/**
9541 * Pushes a word onto the stack.
9542 *
9543 * @returns Strict VBox status code.
9544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9545 * @param u16Value The value to push.
9546 */
9547IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9548{
9549 /* Increment the stack pointer. */
9550 uint64_t uNewRsp;
9551 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9552 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9553
9554 /* Write the word the lazy way. */
9555 uint16_t *pu16Dst;
9556 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9557 if (rc == VINF_SUCCESS)
9558 {
9559 *pu16Dst = u16Value;
9560 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9561 }
9562
9563 /* Commit the new RSP value unless we an access handler made trouble. */
9564 if (rc == VINF_SUCCESS)
9565 pCtx->rsp = uNewRsp;
9566
9567 return rc;
9568}
9569
9570
9571/**
9572 * Pushes a dword onto the stack.
9573 *
9574 * @returns Strict VBox status code.
9575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9576 * @param u32Value The value to push.
9577 */
9578IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9579{
9580 /* Increment the stack pointer. */
9581 uint64_t uNewRsp;
9582 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9583 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9584
9585 /* Write the dword the lazy way. */
9586 uint32_t *pu32Dst;
9587 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9588 if (rc == VINF_SUCCESS)
9589 {
9590 *pu32Dst = u32Value;
9591 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9592 }
9593
9594 /* Commit the new RSP value unless we an access handler made trouble. */
9595 if (rc == VINF_SUCCESS)
9596 pCtx->rsp = uNewRsp;
9597
9598 return rc;
9599}
9600
9601
9602/**
9603 * Pushes a dword segment register value onto the stack.
9604 *
9605 * @returns Strict VBox status code.
9606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9607 * @param u32Value The value to push.
9608 */
9609IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9610{
9611 /* Increment the stack pointer. */
9612 uint64_t uNewRsp;
9613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9614 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9615
9616 VBOXSTRICTRC rc;
9617 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9618 {
9619 /* The recompiler writes a full dword. */
9620 uint32_t *pu32Dst;
9621 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9622 if (rc == VINF_SUCCESS)
9623 {
9624 *pu32Dst = u32Value;
9625 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9626 }
9627 }
9628 else
9629 {
9630 /* The intel docs talks about zero extending the selector register
9631 value. My actual intel CPU here might be zero extending the value
9632 but it still only writes the lower word... */
9633 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9634 * happens when crossing an electric page boundrary, is the high word checked
9635 * for write accessibility or not? Probably it is. What about segment limits?
9636 * It appears this behavior is also shared with trap error codes.
9637 *
9638 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9639 * ancient hardware when it actually did change. */
9640 uint16_t *pu16Dst;
9641 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9642 if (rc == VINF_SUCCESS)
9643 {
9644 *pu16Dst = (uint16_t)u32Value;
9645 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9646 }
9647 }
9648
9649 /* Commit the new RSP value unless we an access handler made trouble. */
9650 if (rc == VINF_SUCCESS)
9651 pCtx->rsp = uNewRsp;
9652
9653 return rc;
9654}
9655
9656
9657/**
9658 * Pushes a qword onto the stack.
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param u64Value The value to push.
9663 */
9664IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9665{
9666 /* Increment the stack pointer. */
9667 uint64_t uNewRsp;
9668 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9669 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9670
9671 /* Write the word the lazy way. */
9672 uint64_t *pu64Dst;
9673 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9674 if (rc == VINF_SUCCESS)
9675 {
9676 *pu64Dst = u64Value;
9677 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9678 }
9679
9680 /* Commit the new RSP value unless we an access handler made trouble. */
9681 if (rc == VINF_SUCCESS)
9682 pCtx->rsp = uNewRsp;
9683
9684 return rc;
9685}
9686
9687
9688/**
9689 * Pops a word from the stack.
9690 *
9691 * @returns Strict VBox status code.
9692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9693 * @param pu16Value Where to store the popped value.
9694 */
9695IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9696{
9697 /* Increment the stack pointer. */
9698 uint64_t uNewRsp;
9699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9700 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9701
9702 /* Write the word the lazy way. */
9703 uint16_t const *pu16Src;
9704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9705 if (rc == VINF_SUCCESS)
9706 {
9707 *pu16Value = *pu16Src;
9708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9709
9710 /* Commit the new RSP value. */
9711 if (rc == VINF_SUCCESS)
9712 pCtx->rsp = uNewRsp;
9713 }
9714
9715 return rc;
9716}
9717
9718
9719/**
9720 * Pops a dword from the stack.
9721 *
9722 * @returns Strict VBox status code.
9723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9724 * @param pu32Value Where to store the popped value.
9725 */
9726IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9727{
9728 /* Increment the stack pointer. */
9729 uint64_t uNewRsp;
9730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9731 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9732
9733 /* Write the word the lazy way. */
9734 uint32_t const *pu32Src;
9735 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9736 if (rc == VINF_SUCCESS)
9737 {
9738 *pu32Value = *pu32Src;
9739 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9740
9741 /* Commit the new RSP value. */
9742 if (rc == VINF_SUCCESS)
9743 pCtx->rsp = uNewRsp;
9744 }
9745
9746 return rc;
9747}
9748
9749
9750/**
9751 * Pops a qword from the stack.
9752 *
9753 * @returns Strict VBox status code.
9754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9755 * @param pu64Value Where to store the popped value.
9756 */
9757IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9758{
9759 /* Increment the stack pointer. */
9760 uint64_t uNewRsp;
9761 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9762 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9763
9764 /* Write the word the lazy way. */
9765 uint64_t const *pu64Src;
9766 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9767 if (rc == VINF_SUCCESS)
9768 {
9769 *pu64Value = *pu64Src;
9770 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9771
9772 /* Commit the new RSP value. */
9773 if (rc == VINF_SUCCESS)
9774 pCtx->rsp = uNewRsp;
9775 }
9776
9777 return rc;
9778}
9779
9780
9781/**
9782 * Pushes a word onto the stack, using a temporary stack pointer.
9783 *
9784 * @returns Strict VBox status code.
9785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9786 * @param u16Value The value to push.
9787 * @param pTmpRsp Pointer to the temporary stack pointer.
9788 */
9789IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9790{
9791 /* Increment the stack pointer. */
9792 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9793 RTUINT64U NewRsp = *pTmpRsp;
9794 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9795
9796 /* Write the word the lazy way. */
9797 uint16_t *pu16Dst;
9798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9799 if (rc == VINF_SUCCESS)
9800 {
9801 *pu16Dst = u16Value;
9802 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9803 }
9804
9805 /* Commit the new RSP value unless we an access handler made trouble. */
9806 if (rc == VINF_SUCCESS)
9807 *pTmpRsp = NewRsp;
9808
9809 return rc;
9810}
9811
9812
9813/**
9814 * Pushes a dword onto the stack, using a temporary stack pointer.
9815 *
9816 * @returns Strict VBox status code.
9817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9818 * @param u32Value The value to push.
9819 * @param pTmpRsp Pointer to the temporary stack pointer.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9822{
9823 /* Increment the stack pointer. */
9824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9825 RTUINT64U NewRsp = *pTmpRsp;
9826 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9827
9828 /* Write the word the lazy way. */
9829 uint32_t *pu32Dst;
9830 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9831 if (rc == VINF_SUCCESS)
9832 {
9833 *pu32Dst = u32Value;
9834 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9835 }
9836
9837 /* Commit the new RSP value unless we an access handler made trouble. */
9838 if (rc == VINF_SUCCESS)
9839 *pTmpRsp = NewRsp;
9840
9841 return rc;
9842}
9843
9844
9845/**
9846 * Pushes a dword onto the stack, using a temporary stack pointer.
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9850 * @param u64Value The value to push.
9851 * @param pTmpRsp Pointer to the temporary stack pointer.
9852 */
9853IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9854{
9855 /* Increment the stack pointer. */
9856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9857 RTUINT64U NewRsp = *pTmpRsp;
9858 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9859
9860 /* Write the word the lazy way. */
9861 uint64_t *pu64Dst;
9862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9863 if (rc == VINF_SUCCESS)
9864 {
9865 *pu64Dst = u64Value;
9866 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9867 }
9868
9869 /* Commit the new RSP value unless we an access handler made trouble. */
9870 if (rc == VINF_SUCCESS)
9871 *pTmpRsp = NewRsp;
9872
9873 return rc;
9874}
9875
9876
9877/**
9878 * Pops a word from the stack, using a temporary stack pointer.
9879 *
9880 * @returns Strict VBox status code.
9881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9882 * @param pu16Value Where to store the popped value.
9883 * @param pTmpRsp Pointer to the temporary stack pointer.
9884 */
9885IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9886{
9887 /* Increment the stack pointer. */
9888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9889 RTUINT64U NewRsp = *pTmpRsp;
9890 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9891
9892 /* Write the word the lazy way. */
9893 uint16_t const *pu16Src;
9894 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9895 if (rc == VINF_SUCCESS)
9896 {
9897 *pu16Value = *pu16Src;
9898 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9899
9900 /* Commit the new RSP value. */
9901 if (rc == VINF_SUCCESS)
9902 *pTmpRsp = NewRsp;
9903 }
9904
9905 return rc;
9906}
9907
9908
9909/**
9910 * Pops a dword from the stack, using a temporary stack pointer.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param pu32Value Where to store the popped value.
9915 * @param pTmpRsp Pointer to the temporary stack pointer.
9916 */
9917IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9918{
9919 /* Increment the stack pointer. */
9920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9921 RTUINT64U NewRsp = *pTmpRsp;
9922 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9923
9924 /* Write the word the lazy way. */
9925 uint32_t const *pu32Src;
9926 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9927 if (rc == VINF_SUCCESS)
9928 {
9929 *pu32Value = *pu32Src;
9930 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9931
9932 /* Commit the new RSP value. */
9933 if (rc == VINF_SUCCESS)
9934 *pTmpRsp = NewRsp;
9935 }
9936
9937 return rc;
9938}
9939
9940
9941/**
9942 * Pops a qword from the stack, using a temporary stack pointer.
9943 *
9944 * @returns Strict VBox status code.
9945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9946 * @param pu64Value Where to store the popped value.
9947 * @param pTmpRsp Pointer to the temporary stack pointer.
9948 */
9949IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9950{
9951 /* Increment the stack pointer. */
9952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9953 RTUINT64U NewRsp = *pTmpRsp;
9954 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9955
9956 /* Write the word the lazy way. */
9957 uint64_t const *pu64Src;
9958 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9959 if (rcStrict == VINF_SUCCESS)
9960 {
9961 *pu64Value = *pu64Src;
9962 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9963
9964 /* Commit the new RSP value. */
9965 if (rcStrict == VINF_SUCCESS)
9966 *pTmpRsp = NewRsp;
9967 }
9968
9969 return rcStrict;
9970}
9971
9972
9973/**
9974 * Begin a special stack push (used by interrupt, exceptions and such).
9975 *
9976 * This will raise \#SS or \#PF if appropriate.
9977 *
9978 * @returns Strict VBox status code.
9979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9980 * @param cbMem The number of bytes to push onto the stack.
9981 * @param ppvMem Where to return the pointer to the stack memory.
9982 * As with the other memory functions this could be
9983 * direct access or bounce buffered access, so
9984 * don't commit register until the commit call
9985 * succeeds.
9986 * @param puNewRsp Where to return the new RSP value. This must be
9987 * passed unchanged to
9988 * iemMemStackPushCommitSpecial().
9989 */
9990IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
9991{
9992 Assert(cbMem < UINT8_MAX);
9993 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9994 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9995 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9996}
9997
9998
9999/**
10000 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10001 *
10002 * This will update the rSP.
10003 *
10004 * @returns Strict VBox status code.
10005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10006 * @param pvMem The pointer returned by
10007 * iemMemStackPushBeginSpecial().
10008 * @param uNewRsp The new RSP value returned by
10009 * iemMemStackPushBeginSpecial().
10010 */
10011IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10012{
10013 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10014 if (rcStrict == VINF_SUCCESS)
10015 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10016 return rcStrict;
10017}
10018
10019
10020/**
10021 * Begin a special stack pop (used by iret, retf and such).
10022 *
10023 * This will raise \#SS or \#PF if appropriate.
10024 *
10025 * @returns Strict VBox status code.
10026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10027 * @param cbMem The number of bytes to pop from the stack.
10028 * @param ppvMem Where to return the pointer to the stack memory.
10029 * @param puNewRsp Where to return the new RSP value. This must be
10030 * assigned to CPUMCTX::rsp manually some time
10031 * after iemMemStackPopDoneSpecial() has been
10032 * called.
10033 */
10034IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10035{
10036 Assert(cbMem < UINT8_MAX);
10037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10038 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10039 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10040}
10041
10042
10043/**
10044 * Continue a special stack pop (used by iret and retf).
10045 *
10046 * This will raise \#SS or \#PF if appropriate.
10047 *
10048 * @returns Strict VBox status code.
10049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10050 * @param cbMem The number of bytes to pop from the stack.
10051 * @param ppvMem Where to return the pointer to the stack memory.
10052 * @param puNewRsp Where to return the new RSP value. This must be
10053 * assigned to CPUMCTX::rsp manually some time
10054 * after iemMemStackPopDoneSpecial() has been
10055 * called.
10056 */
10057IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10058{
10059 Assert(cbMem < UINT8_MAX);
10060 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10061 RTUINT64U NewRsp;
10062 NewRsp.u = *puNewRsp;
10063 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10064 *puNewRsp = NewRsp.u;
10065 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10066}
10067
10068
10069/**
10070 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10071 * iemMemStackPopContinueSpecial).
10072 *
10073 * The caller will manually commit the rSP.
10074 *
10075 * @returns Strict VBox status code.
10076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10077 * @param pvMem The pointer returned by
10078 * iemMemStackPopBeginSpecial() or
10079 * iemMemStackPopContinueSpecial().
10080 */
10081IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10082{
10083 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10084}
10085
10086
10087/**
10088 * Fetches a system table byte.
10089 *
10090 * @returns Strict VBox status code.
10091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10092 * @param pbDst Where to return the byte.
10093 * @param iSegReg The index of the segment register to use for
10094 * this access. The base and limits are checked.
10095 * @param GCPtrMem The address of the guest memory.
10096 */
10097IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10098{
10099 /* The lazy approach for now... */
10100 uint8_t const *pbSrc;
10101 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10102 if (rc == VINF_SUCCESS)
10103 {
10104 *pbDst = *pbSrc;
10105 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10106 }
10107 return rc;
10108}
10109
10110
10111/**
10112 * Fetches a system table word.
10113 *
10114 * @returns Strict VBox status code.
10115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10116 * @param pu16Dst Where to return the word.
10117 * @param iSegReg The index of the segment register to use for
10118 * this access. The base and limits are checked.
10119 * @param GCPtrMem The address of the guest memory.
10120 */
10121IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10122{
10123 /* The lazy approach for now... */
10124 uint16_t const *pu16Src;
10125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10126 if (rc == VINF_SUCCESS)
10127 {
10128 *pu16Dst = *pu16Src;
10129 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10130 }
10131 return rc;
10132}
10133
10134
10135/**
10136 * Fetches a system table dword.
10137 *
10138 * @returns Strict VBox status code.
10139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10140 * @param pu32Dst Where to return the dword.
10141 * @param iSegReg The index of the segment register to use for
10142 * this access. The base and limits are checked.
10143 * @param GCPtrMem The address of the guest memory.
10144 */
10145IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10146{
10147 /* The lazy approach for now... */
10148 uint32_t const *pu32Src;
10149 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10150 if (rc == VINF_SUCCESS)
10151 {
10152 *pu32Dst = *pu32Src;
10153 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10154 }
10155 return rc;
10156}
10157
10158
10159/**
10160 * Fetches a system table qword.
10161 *
10162 * @returns Strict VBox status code.
10163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10164 * @param pu64Dst Where to return the qword.
10165 * @param iSegReg The index of the segment register to use for
10166 * this access. The base and limits are checked.
10167 * @param GCPtrMem The address of the guest memory.
10168 */
10169IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10170{
10171 /* The lazy approach for now... */
10172 uint64_t const *pu64Src;
10173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10174 if (rc == VINF_SUCCESS)
10175 {
10176 *pu64Dst = *pu64Src;
10177 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10178 }
10179 return rc;
10180}
10181
10182
10183/**
10184 * Fetches a descriptor table entry with caller specified error code.
10185 *
10186 * @returns Strict VBox status code.
10187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10188 * @param pDesc Where to return the descriptor table entry.
10189 * @param uSel The selector which table entry to fetch.
10190 * @param uXcpt The exception to raise on table lookup error.
10191 * @param uErrorCode The error code associated with the exception.
10192 */
10193IEM_STATIC VBOXSTRICTRC
10194iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10195{
10196 AssertPtr(pDesc);
10197 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10198
10199 /** @todo did the 286 require all 8 bytes to be accessible? */
10200 /*
10201 * Get the selector table base and check bounds.
10202 */
10203 RTGCPTR GCPtrBase;
10204 if (uSel & X86_SEL_LDT)
10205 {
10206 if ( !pCtx->ldtr.Attr.n.u1Present
10207 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10208 {
10209 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10210 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10211 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10212 uErrorCode, 0);
10213 }
10214
10215 Assert(pCtx->ldtr.Attr.n.u1Present);
10216 GCPtrBase = pCtx->ldtr.u64Base;
10217 }
10218 else
10219 {
10220 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10221 {
10222 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10223 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10224 uErrorCode, 0);
10225 }
10226 GCPtrBase = pCtx->gdtr.pGdt;
10227 }
10228
10229 /*
10230 * Read the legacy descriptor and maybe the long mode extensions if
10231 * required.
10232 */
10233 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10234 if (rcStrict == VINF_SUCCESS)
10235 {
10236 if ( !IEM_IS_LONG_MODE(pVCpu)
10237 || pDesc->Legacy.Gen.u1DescType)
10238 pDesc->Long.au64[1] = 0;
10239 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10240 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10241 else
10242 {
10243 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10244 /** @todo is this the right exception? */
10245 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10246 }
10247 }
10248 return rcStrict;
10249}
10250
10251
10252/**
10253 * Fetches a descriptor table entry.
10254 *
10255 * @returns Strict VBox status code.
10256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10257 * @param pDesc Where to return the descriptor table entry.
10258 * @param uSel The selector which table entry to fetch.
10259 * @param uXcpt The exception to raise on table lookup error.
10260 */
10261IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10262{
10263 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10264}
10265
10266
10267/**
10268 * Fakes a long mode stack selector for SS = 0.
10269 *
10270 * @param pDescSs Where to return the fake stack descriptor.
10271 * @param uDpl The DPL we want.
10272 */
10273IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10274{
10275 pDescSs->Long.au64[0] = 0;
10276 pDescSs->Long.au64[1] = 0;
10277 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10278 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10279 pDescSs->Long.Gen.u2Dpl = uDpl;
10280 pDescSs->Long.Gen.u1Present = 1;
10281 pDescSs->Long.Gen.u1Long = 1;
10282}
10283
10284
10285/**
10286 * Marks the selector descriptor as accessed (only non-system descriptors).
10287 *
10288 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10289 * will therefore skip the limit checks.
10290 *
10291 * @returns Strict VBox status code.
10292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10293 * @param uSel The selector.
10294 */
10295IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10296{
10297 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10298
10299 /*
10300 * Get the selector table base and calculate the entry address.
10301 */
10302 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10303 ? pCtx->ldtr.u64Base
10304 : pCtx->gdtr.pGdt;
10305 GCPtr += uSel & X86_SEL_MASK;
10306
10307 /*
10308 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10309 * ugly stuff to avoid this. This will make sure it's an atomic access
10310 * as well more or less remove any question about 8-bit or 32-bit accesss.
10311 */
10312 VBOXSTRICTRC rcStrict;
10313 uint32_t volatile *pu32;
10314 if ((GCPtr & 3) == 0)
10315 {
10316 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10317 GCPtr += 2 + 2;
10318 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10319 if (rcStrict != VINF_SUCCESS)
10320 return rcStrict;
10321 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10322 }
10323 else
10324 {
10325 /* The misaligned GDT/LDT case, map the whole thing. */
10326 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10327 if (rcStrict != VINF_SUCCESS)
10328 return rcStrict;
10329 switch ((uintptr_t)pu32 & 3)
10330 {
10331 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10332 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10333 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10334 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10335 }
10336 }
10337
10338 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10339}
10340
10341/** @} */
10342
10343
10344/*
10345 * Include the C/C++ implementation of instruction.
10346 */
10347#include "IEMAllCImpl.cpp.h"
10348
10349
10350
10351/** @name "Microcode" macros.
10352 *
10353 * The idea is that we should be able to use the same code to interpret
10354 * instructions as well as recompiler instructions. Thus this obfuscation.
10355 *
10356 * @{
10357 */
10358#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10359#define IEM_MC_END() }
10360#define IEM_MC_PAUSE() do {} while (0)
10361#define IEM_MC_CONTINUE() do {} while (0)
10362
10363/** Internal macro. */
10364#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10365 do \
10366 { \
10367 VBOXSTRICTRC rcStrict2 = a_Expr; \
10368 if (rcStrict2 != VINF_SUCCESS) \
10369 return rcStrict2; \
10370 } while (0)
10371
10372
10373#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10374#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10375#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10376#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10377#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10378#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10379#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10380#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10381#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10382 do { \
10383 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10384 return iemRaiseDeviceNotAvailable(pVCpu); \
10385 } while (0)
10386#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10387 do { \
10388 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10389 return iemRaiseMathFault(pVCpu); \
10390 } while (0)
10391#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10392 do { \
10393 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10394 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10395 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10396 return iemRaiseUndefinedOpcode(pVCpu); \
10397 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10398 return iemRaiseDeviceNotAvailable(pVCpu); \
10399 } while (0)
10400#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10401 do { \
10402 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10403 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10404 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10405 return iemRaiseUndefinedOpcode(pVCpu); \
10406 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10407 return iemRaiseDeviceNotAvailable(pVCpu); \
10408 } while (0)
10409#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10410 do { \
10411 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10412 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10413 return iemRaiseUndefinedOpcode(pVCpu); \
10414 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10415 return iemRaiseDeviceNotAvailable(pVCpu); \
10416 } while (0)
10417#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10418 do { \
10419 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10420 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10421 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10422 return iemRaiseUndefinedOpcode(pVCpu); \
10423 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10424 return iemRaiseDeviceNotAvailable(pVCpu); \
10425 } while (0)
10426#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10427 do { \
10428 if (pVCpu->iem.s.uCpl != 0) \
10429 return iemRaiseGeneralProtectionFault0(pVCpu); \
10430 } while (0)
10431
10432
10433#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10434#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10435#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10436#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10437#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10438#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10439#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10440 uint32_t a_Name; \
10441 uint32_t *a_pName = &a_Name
10442#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10443 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10444
10445#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10446#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10447
10448#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10449#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10450#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10451#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10452#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10453#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10454#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10455#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10456#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10457#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10458#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10459#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10460#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10461#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10462#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10463#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10464#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10465#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10466#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10467#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10468#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10469#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10470#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10471#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10472#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10473#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10474#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10475#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10476#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10477/** @note Not for IOPL or IF testing or modification. */
10478#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10479#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10480#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10481#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10482
10483#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10484#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10485#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10486#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10487#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10488#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10489#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10490#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10491#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10492#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10493#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10494 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10495
10496#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10497#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10498/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10499 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10500#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10501#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10502/** @note Not for IOPL or IF testing or modification. */
10503#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10504
10505#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10506#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10507#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10508 do { \
10509 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10510 *pu32Reg += (a_u32Value); \
10511 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10512 } while (0)
10513#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10514
10515#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10516#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10517#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10518 do { \
10519 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10520 *pu32Reg -= (a_u32Value); \
10521 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10522 } while (0)
10523#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10524#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10525
10526#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10527#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10528#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10529#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10530#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10531#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10532#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10533
10534#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10535#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10536#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10537#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10538
10539#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10540#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10541#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10542
10543#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10544#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10545#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10546
10547#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10548#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10549#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10550
10551#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10552#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10553#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10554
10555#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10556
10557#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10558
10559#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10560#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10561#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10562 do { \
10563 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10564 *pu32Reg &= (a_u32Value); \
10565 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10566 } while (0)
10567#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10568
10569#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10570#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10571#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10572 do { \
10573 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10574 *pu32Reg |= (a_u32Value); \
10575 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10576 } while (0)
10577#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10578
10579
10580/** @note Not for IOPL or IF modification. */
10581#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10582/** @note Not for IOPL or IF modification. */
10583#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10584/** @note Not for IOPL or IF modification. */
10585#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10586
10587#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10588
10589
10590#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10591 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10592#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10593 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10594#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10595 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10596#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10597 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10598#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10599 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10600#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10601 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10602#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10603 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10604
10605#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10606 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10607#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10608 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10609#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10610 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10611#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10612 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10613#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10614 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10615#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10616 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10617 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10618 } while (0)
10619#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10620 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10621 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10622 } while (0)
10623#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10624 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10625#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10626 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10627#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10628 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10629#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10630 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10631 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10632
10633#ifndef IEM_WITH_SETJMP
10634# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10636# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10638# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10639 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10640#else
10641# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10642 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10643# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10644 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10645# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10646 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10647#endif
10648
10649#ifndef IEM_WITH_SETJMP
10650# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10652# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10654# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10656#else
10657# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10658 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10659# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10660 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10661# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10662 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10663#endif
10664
10665#ifndef IEM_WITH_SETJMP
10666# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10668# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10670# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10672#else
10673# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10674 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10675# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10676 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10677# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10678 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10679#endif
10680
10681#ifdef SOME_UNUSED_FUNCTION
10682# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10684#endif
10685
10686#ifndef IEM_WITH_SETJMP
10687# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10689# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10691# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10693# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10695#else
10696# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10697 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10698# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10699 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10700# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10701 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10702# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10703 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10704#endif
10705
10706#ifndef IEM_WITH_SETJMP
10707# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10709# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10711# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10713#else
10714# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10715 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10716# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10717 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10718# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10719 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10720#endif
10721
10722#ifndef IEM_WITH_SETJMP
10723# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10725# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10727#else
10728# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10729 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10730# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10731 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10732#endif
10733
10734
10735
10736#ifndef IEM_WITH_SETJMP
10737# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10738 do { \
10739 uint8_t u8Tmp; \
10740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10741 (a_u16Dst) = u8Tmp; \
10742 } while (0)
10743# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10744 do { \
10745 uint8_t u8Tmp; \
10746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10747 (a_u32Dst) = u8Tmp; \
10748 } while (0)
10749# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10750 do { \
10751 uint8_t u8Tmp; \
10752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10753 (a_u64Dst) = u8Tmp; \
10754 } while (0)
10755# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10756 do { \
10757 uint16_t u16Tmp; \
10758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10759 (a_u32Dst) = u16Tmp; \
10760 } while (0)
10761# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10762 do { \
10763 uint16_t u16Tmp; \
10764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10765 (a_u64Dst) = u16Tmp; \
10766 } while (0)
10767# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10768 do { \
10769 uint32_t u32Tmp; \
10770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10771 (a_u64Dst) = u32Tmp; \
10772 } while (0)
10773#else /* IEM_WITH_SETJMP */
10774# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10775 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10776# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10777 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10778# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10779 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10780# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10781 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10782# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10783 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10784# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10785 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10786#endif /* IEM_WITH_SETJMP */
10787
10788#ifndef IEM_WITH_SETJMP
10789# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10790 do { \
10791 uint8_t u8Tmp; \
10792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10793 (a_u16Dst) = (int8_t)u8Tmp; \
10794 } while (0)
10795# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10796 do { \
10797 uint8_t u8Tmp; \
10798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10799 (a_u32Dst) = (int8_t)u8Tmp; \
10800 } while (0)
10801# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10802 do { \
10803 uint8_t u8Tmp; \
10804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10805 (a_u64Dst) = (int8_t)u8Tmp; \
10806 } while (0)
10807# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10808 do { \
10809 uint16_t u16Tmp; \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10811 (a_u32Dst) = (int16_t)u16Tmp; \
10812 } while (0)
10813# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10814 do { \
10815 uint16_t u16Tmp; \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10817 (a_u64Dst) = (int16_t)u16Tmp; \
10818 } while (0)
10819# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10820 do { \
10821 uint32_t u32Tmp; \
10822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10823 (a_u64Dst) = (int32_t)u32Tmp; \
10824 } while (0)
10825#else /* IEM_WITH_SETJMP */
10826# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10827 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10828# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10829 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10830# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10831 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10832# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10833 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10834# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10835 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10836# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10837 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10838#endif /* IEM_WITH_SETJMP */
10839
10840#ifndef IEM_WITH_SETJMP
10841# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10843# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10845# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10846 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10847# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10848 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10849#else
10850# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10851 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10852# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10853 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10854# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10855 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10856# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10857 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10858#endif
10859
10860#ifndef IEM_WITH_SETJMP
10861# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10862 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10863# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10865# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10867# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10869#else
10870# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10871 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10872# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10873 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10874# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10875 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10876# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10877 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10878#endif
10879
10880#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10881#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10882#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10883#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10884#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10885#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10886#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10887 do { \
10888 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10889 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10890 } while (0)
10891
10892#ifndef IEM_WITH_SETJMP
10893# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10894 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10895# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10896 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10897#else
10898# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10899 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10900# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10901 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10902#endif
10903
10904
10905#define IEM_MC_PUSH_U16(a_u16Value) \
10906 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10907#define IEM_MC_PUSH_U32(a_u32Value) \
10908 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10909#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10910 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10911#define IEM_MC_PUSH_U64(a_u64Value) \
10912 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10913
10914#define IEM_MC_POP_U16(a_pu16Value) \
10915 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10916#define IEM_MC_POP_U32(a_pu32Value) \
10917 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10918#define IEM_MC_POP_U64(a_pu64Value) \
10919 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10920
10921/** Maps guest memory for direct or bounce buffered access.
10922 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10923 * @remarks May return.
10924 */
10925#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10926 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10927
10928/** Maps guest memory for direct or bounce buffered access.
10929 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10930 * @remarks May return.
10931 */
10932#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10933 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10934
10935/** Commits the memory and unmaps the guest memory.
10936 * @remarks May return.
10937 */
10938#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10939 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10940
10941/** Commits the memory and unmaps the guest memory unless the FPU status word
10942 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10943 * that would cause FLD not to store.
10944 *
10945 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10946 * store, while \#P will not.
10947 *
10948 * @remarks May in theory return - for now.
10949 */
10950#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10951 do { \
10952 if ( !(a_u16FSW & X86_FSW_ES) \
10953 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10954 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10955 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10956 } while (0)
10957
10958/** Calculate efficient address from R/M. */
10959#ifndef IEM_WITH_SETJMP
10960# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10961 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10962#else
10963# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10964 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10965#endif
10966
10967#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10968#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10969#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10970#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10971#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10972#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10973#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10974
10975/**
10976 * Defers the rest of the instruction emulation to a C implementation routine
10977 * and returns, only taking the standard parameters.
10978 *
10979 * @param a_pfnCImpl The pointer to the C routine.
10980 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10981 */
10982#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10983
10984/**
10985 * Defers the rest of instruction emulation to a C implementation routine and
10986 * returns, taking one argument in addition to the standard ones.
10987 *
10988 * @param a_pfnCImpl The pointer to the C routine.
10989 * @param a0 The argument.
10990 */
10991#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10992
10993/**
10994 * Defers the rest of the instruction emulation to a C implementation routine
10995 * and returns, taking two arguments in addition to the standard ones.
10996 *
10997 * @param a_pfnCImpl The pointer to the C routine.
10998 * @param a0 The first extra argument.
10999 * @param a1 The second extra argument.
11000 */
11001#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11002
11003/**
11004 * Defers the rest of the instruction emulation to a C implementation routine
11005 * and returns, taking three arguments in addition to the standard ones.
11006 *
11007 * @param a_pfnCImpl The pointer to the C routine.
11008 * @param a0 The first extra argument.
11009 * @param a1 The second extra argument.
11010 * @param a2 The third extra argument.
11011 */
11012#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11013
11014/**
11015 * Defers the rest of the instruction emulation to a C implementation routine
11016 * and returns, taking four arguments in addition to the standard ones.
11017 *
11018 * @param a_pfnCImpl The pointer to the C routine.
11019 * @param a0 The first extra argument.
11020 * @param a1 The second extra argument.
11021 * @param a2 The third extra argument.
11022 * @param a3 The fourth extra argument.
11023 */
11024#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11025
11026/**
11027 * Defers the rest of the instruction emulation to a C implementation routine
11028 * and returns, taking two arguments in addition to the standard ones.
11029 *
11030 * @param a_pfnCImpl The pointer to the C routine.
11031 * @param a0 The first extra argument.
11032 * @param a1 The second extra argument.
11033 * @param a2 The third extra argument.
11034 * @param a3 The fourth extra argument.
11035 * @param a4 The fifth extra argument.
11036 */
11037#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11038
11039/**
11040 * Defers the entire instruction emulation to a C implementation routine and
11041 * returns, only taking the standard parameters.
11042 *
11043 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11044 *
11045 * @param a_pfnCImpl The pointer to the C routine.
11046 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11047 */
11048#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11049
11050/**
11051 * Defers the entire instruction emulation to a C implementation routine and
11052 * returns, taking one argument in addition to the standard ones.
11053 *
11054 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11055 *
11056 * @param a_pfnCImpl The pointer to the C routine.
11057 * @param a0 The argument.
11058 */
11059#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11060
11061/**
11062 * Defers the entire instruction emulation to a C implementation routine and
11063 * returns, taking two arguments in addition to the standard ones.
11064 *
11065 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11066 *
11067 * @param a_pfnCImpl The pointer to the C routine.
11068 * @param a0 The first extra argument.
11069 * @param a1 The second extra argument.
11070 */
11071#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11072
11073/**
11074 * Defers the entire instruction emulation to a C implementation routine and
11075 * returns, taking three arguments in addition to the standard ones.
11076 *
11077 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11078 *
11079 * @param a_pfnCImpl The pointer to the C routine.
11080 * @param a0 The first extra argument.
11081 * @param a1 The second extra argument.
11082 * @param a2 The third extra argument.
11083 */
11084#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11085
11086/**
11087 * Calls a FPU assembly implementation taking one visible argument.
11088 *
11089 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11090 * @param a0 The first extra argument.
11091 */
11092#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11093 do { \
11094 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11095 } while (0)
11096
11097/**
11098 * Calls a FPU assembly implementation taking two visible arguments.
11099 *
11100 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11101 * @param a0 The first extra argument.
11102 * @param a1 The second extra argument.
11103 */
11104#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11105 do { \
11106 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11107 } while (0)
11108
11109/**
11110 * Calls a FPU assembly implementation taking three visible arguments.
11111 *
11112 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11113 * @param a0 The first extra argument.
11114 * @param a1 The second extra argument.
11115 * @param a2 The third extra argument.
11116 */
11117#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11118 do { \
11119 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11120 } while (0)
11121
11122#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11123 do { \
11124 (a_FpuData).FSW = (a_FSW); \
11125 (a_FpuData).r80Result = *(a_pr80Value); \
11126 } while (0)
11127
11128/** Pushes FPU result onto the stack. */
11129#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11130 iemFpuPushResult(pVCpu, &a_FpuData)
11131/** Pushes FPU result onto the stack and sets the FPUDP. */
11132#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11133 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11134
11135/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11136#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11137 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11138
11139/** Stores FPU result in a stack register. */
11140#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11141 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11142/** Stores FPU result in a stack register and pops the stack. */
11143#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11144 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11145/** Stores FPU result in a stack register and sets the FPUDP. */
11146#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11147 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11148/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11149 * stack. */
11150#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11151 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11152
11153/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11154#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11155 iemFpuUpdateOpcodeAndIp(pVCpu)
11156/** Free a stack register (for FFREE and FFREEP). */
11157#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11158 iemFpuStackFree(pVCpu, a_iStReg)
11159/** Increment the FPU stack pointer. */
11160#define IEM_MC_FPU_STACK_INC_TOP() \
11161 iemFpuStackIncTop(pVCpu)
11162/** Decrement the FPU stack pointer. */
11163#define IEM_MC_FPU_STACK_DEC_TOP() \
11164 iemFpuStackDecTop(pVCpu)
11165
11166/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11167#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11168 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11169/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11170#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11171 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11172/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11173#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11174 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11175/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11176#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11177 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11178/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11179 * stack. */
11180#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11181 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11182/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11183#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11184 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11185
11186/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11187#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11188 iemFpuStackUnderflow(pVCpu, a_iStDst)
11189/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11190 * stack. */
11191#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11192 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11193/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11194 * FPUDS. */
11195#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11196 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11197/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11198 * FPUDS. Pops stack. */
11199#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11200 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11201/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11202 * stack twice. */
11203#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11204 iemFpuStackUnderflowThenPopPop(pVCpu)
11205/** Raises a FPU stack underflow exception for an instruction pushing a result
11206 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11207#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11208 iemFpuStackPushUnderflow(pVCpu)
11209/** Raises a FPU stack underflow exception for an instruction pushing a result
11210 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11211#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11212 iemFpuStackPushUnderflowTwo(pVCpu)
11213
11214/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11215 * FPUIP, FPUCS and FOP. */
11216#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11217 iemFpuStackPushOverflow(pVCpu)
11218/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11219 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11220#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11221 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11222/** Prepares for using the FPU state.
11223 * Ensures that we can use the host FPU in the current context (RC+R0.
11224 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11225#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11226/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11227#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11228/** Actualizes the guest FPU state so it can be accessed and modified. */
11229#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11230
11231/** Prepares for using the SSE state.
11232 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11233 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11234#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11235/** Actualizes the guest XMM0..15 register state for read-only access. */
11236#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11237/** Actualizes the guest XMM0..15 register state for read-write access. */
11238#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11239
11240/**
11241 * Calls a MMX assembly implementation taking two visible arguments.
11242 *
11243 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11244 * @param a0 The first extra argument.
11245 * @param a1 The second extra argument.
11246 */
11247#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11248 do { \
11249 IEM_MC_PREPARE_FPU_USAGE(); \
11250 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11251 } while (0)
11252
11253/**
11254 * Calls a MMX assembly implementation taking three visible arguments.
11255 *
11256 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11257 * @param a0 The first extra argument.
11258 * @param a1 The second extra argument.
11259 * @param a2 The third extra argument.
11260 */
11261#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11262 do { \
11263 IEM_MC_PREPARE_FPU_USAGE(); \
11264 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11265 } while (0)
11266
11267
11268/**
11269 * Calls a SSE assembly implementation taking two visible arguments.
11270 *
11271 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11272 * @param a0 The first extra argument.
11273 * @param a1 The second extra argument.
11274 */
11275#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11276 do { \
11277 IEM_MC_PREPARE_SSE_USAGE(); \
11278 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11279 } while (0)
11280
11281/**
11282 * Calls a SSE assembly implementation taking three visible arguments.
11283 *
11284 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11285 * @param a0 The first extra argument.
11286 * @param a1 The second extra argument.
11287 * @param a2 The third extra argument.
11288 */
11289#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11290 do { \
11291 IEM_MC_PREPARE_SSE_USAGE(); \
11292 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11293 } while (0)
11294
11295/** @note Not for IOPL or IF testing. */
11296#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11297/** @note Not for IOPL or IF testing. */
11298#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11299/** @note Not for IOPL or IF testing. */
11300#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11301/** @note Not for IOPL or IF testing. */
11302#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11303/** @note Not for IOPL or IF testing. */
11304#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11305 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11306 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11307/** @note Not for IOPL or IF testing. */
11308#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11309 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11310 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11311/** @note Not for IOPL or IF testing. */
11312#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11313 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11314 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11315 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11316/** @note Not for IOPL or IF testing. */
11317#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11318 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11319 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11320 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11321#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11322#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11323#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11324/** @note Not for IOPL or IF testing. */
11325#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11326 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11327 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11328/** @note Not for IOPL or IF testing. */
11329#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11330 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11331 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11332/** @note Not for IOPL or IF testing. */
11333#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11334 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11335 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11336/** @note Not for IOPL or IF testing. */
11337#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11338 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11339 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11340/** @note Not for IOPL or IF testing. */
11341#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11342 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11343 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11344/** @note Not for IOPL or IF testing. */
11345#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11346 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11347 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11348#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11349#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11350
11351#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11352 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11353#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11354 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11355#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11356 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11357#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11358 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11359#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11360 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11361#define IEM_MC_IF_FCW_IM() \
11362 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11363
11364#define IEM_MC_ELSE() } else {
11365#define IEM_MC_ENDIF() } do {} while (0)
11366
11367/** @} */
11368
11369
11370/** @name Opcode Debug Helpers.
11371 * @{
11372 */
11373#ifdef DEBUG
11374# define IEMOP_MNEMONIC(a_szMnemonic) \
11375 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11376 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11377# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11378 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11379 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11380#else
11381# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11382# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11383#endif
11384
11385/** @} */
11386
11387
11388/** @name Opcode Helpers.
11389 * @{
11390 */
11391
11392#ifdef IN_RING3
11393# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11394 do { \
11395 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11396 else \
11397 { \
11398 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11399 return IEMOP_RAISE_INVALID_OPCODE(); \
11400 } \
11401 } while (0)
11402#else
11403# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11404 do { \
11405 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11406 else return IEMOP_RAISE_INVALID_OPCODE(); \
11407 } while (0)
11408#endif
11409
11410/** The instruction requires a 186 or later. */
11411#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11412# define IEMOP_HLP_MIN_186() do { } while (0)
11413#else
11414# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11415#endif
11416
11417/** The instruction requires a 286 or later. */
11418#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11419# define IEMOP_HLP_MIN_286() do { } while (0)
11420#else
11421# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11422#endif
11423
11424/** The instruction requires a 386 or later. */
11425#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11426# define IEMOP_HLP_MIN_386() do { } while (0)
11427#else
11428# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11429#endif
11430
11431/** The instruction requires a 386 or later if the given expression is true. */
11432#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11433# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11434#else
11435# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11436#endif
11437
11438/** The instruction requires a 486 or later. */
11439#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11440# define IEMOP_HLP_MIN_486() do { } while (0)
11441#else
11442# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11443#endif
11444
11445/** The instruction requires a Pentium (586) or later. */
11446#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11447# define IEMOP_HLP_MIN_586() do { } while (0)
11448#else
11449# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11450#endif
11451
11452/** The instruction requires a PentiumPro (686) or later. */
11453#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11454# define IEMOP_HLP_MIN_686() do { } while (0)
11455#else
11456# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11457#endif
11458
11459
11460/** The instruction raises an \#UD in real and V8086 mode. */
11461#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11462 do \
11463 { \
11464 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11465 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11466 } while (0)
11467
11468/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11469 * 64-bit mode. */
11470#define IEMOP_HLP_NO_64BIT() \
11471 do \
11472 { \
11473 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11474 return IEMOP_RAISE_INVALID_OPCODE(); \
11475 } while (0)
11476
11477/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11478 * 64-bit mode. */
11479#define IEMOP_HLP_ONLY_64BIT() \
11480 do \
11481 { \
11482 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11483 return IEMOP_RAISE_INVALID_OPCODE(); \
11484 } while (0)
11485
11486/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11487#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11488 do \
11489 { \
11490 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11491 iemRecalEffOpSize64Default(pVCpu); \
11492 } while (0)
11493
11494/** The instruction has 64-bit operand size if 64-bit mode. */
11495#define IEMOP_HLP_64BIT_OP_SIZE() \
11496 do \
11497 { \
11498 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11499 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11500 } while (0)
11501
11502/** Only a REX prefix immediately preceeding the first opcode byte takes
11503 * effect. This macro helps ensuring this as well as logging bad guest code. */
11504#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11505 do \
11506 { \
11507 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11508 { \
11509 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11510 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11511 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11512 pVCpu->iem.s.uRexB = 0; \
11513 pVCpu->iem.s.uRexIndex = 0; \
11514 pVCpu->iem.s.uRexReg = 0; \
11515 iemRecalEffOpSize(pVCpu); \
11516 } \
11517 } while (0)
11518
11519/**
11520 * Done decoding.
11521 */
11522#define IEMOP_HLP_DONE_DECODING() \
11523 do \
11524 { \
11525 /*nothing for now, maybe later... */ \
11526 } while (0)
11527
11528/**
11529 * Done decoding, raise \#UD exception if lock prefix present.
11530 */
11531#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11532 do \
11533 { \
11534 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11535 { /* likely */ } \
11536 else \
11537 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11538 } while (0)
11539#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11540 do \
11541 { \
11542 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11543 { /* likely */ } \
11544 else \
11545 { \
11546 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11547 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11548 } \
11549 } while (0)
11550#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11551 do \
11552 { \
11553 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11554 { /* likely */ } \
11555 else \
11556 { \
11557 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11558 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11559 } \
11560 } while (0)
11561
11562/**
11563 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11564 * are present.
11565 */
11566#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11567 do \
11568 { \
11569 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11570 { /* likely */ } \
11571 else \
11572 return IEMOP_RAISE_INVALID_OPCODE(); \
11573 } while (0)
11574
11575
11576/**
11577 * Calculates the effective address of a ModR/M memory operand.
11578 *
11579 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11580 *
11581 * @return Strict VBox status code.
11582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11583 * @param bRm The ModRM byte.
11584 * @param cbImm The size of any immediate following the
11585 * effective address opcode bytes. Important for
11586 * RIP relative addressing.
11587 * @param pGCPtrEff Where to return the effective address.
11588 */
11589IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11590{
11591 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11592 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11593# define SET_SS_DEF() \
11594 do \
11595 { \
11596 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11597 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11598 } while (0)
11599
11600 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11601 {
11602/** @todo Check the effective address size crap! */
11603 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11604 {
11605 uint16_t u16EffAddr;
11606
11607 /* Handle the disp16 form with no registers first. */
11608 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11609 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11610 else
11611 {
11612 /* Get the displacment. */
11613 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11614 {
11615 case 0: u16EffAddr = 0; break;
11616 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11617 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11618 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11619 }
11620
11621 /* Add the base and index registers to the disp. */
11622 switch (bRm & X86_MODRM_RM_MASK)
11623 {
11624 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11625 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11626 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11627 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11628 case 4: u16EffAddr += pCtx->si; break;
11629 case 5: u16EffAddr += pCtx->di; break;
11630 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11631 case 7: u16EffAddr += pCtx->bx; break;
11632 }
11633 }
11634
11635 *pGCPtrEff = u16EffAddr;
11636 }
11637 else
11638 {
11639 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11640 uint32_t u32EffAddr;
11641
11642 /* Handle the disp32 form with no registers first. */
11643 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11644 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11645 else
11646 {
11647 /* Get the register (or SIB) value. */
11648 switch ((bRm & X86_MODRM_RM_MASK))
11649 {
11650 case 0: u32EffAddr = pCtx->eax; break;
11651 case 1: u32EffAddr = pCtx->ecx; break;
11652 case 2: u32EffAddr = pCtx->edx; break;
11653 case 3: u32EffAddr = pCtx->ebx; break;
11654 case 4: /* SIB */
11655 {
11656 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11657
11658 /* Get the index and scale it. */
11659 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11660 {
11661 case 0: u32EffAddr = pCtx->eax; break;
11662 case 1: u32EffAddr = pCtx->ecx; break;
11663 case 2: u32EffAddr = pCtx->edx; break;
11664 case 3: u32EffAddr = pCtx->ebx; break;
11665 case 4: u32EffAddr = 0; /*none */ break;
11666 case 5: u32EffAddr = pCtx->ebp; break;
11667 case 6: u32EffAddr = pCtx->esi; break;
11668 case 7: u32EffAddr = pCtx->edi; break;
11669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11670 }
11671 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11672
11673 /* add base */
11674 switch (bSib & X86_SIB_BASE_MASK)
11675 {
11676 case 0: u32EffAddr += pCtx->eax; break;
11677 case 1: u32EffAddr += pCtx->ecx; break;
11678 case 2: u32EffAddr += pCtx->edx; break;
11679 case 3: u32EffAddr += pCtx->ebx; break;
11680 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11681 case 5:
11682 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11683 {
11684 u32EffAddr += pCtx->ebp;
11685 SET_SS_DEF();
11686 }
11687 else
11688 {
11689 uint32_t u32Disp;
11690 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11691 u32EffAddr += u32Disp;
11692 }
11693 break;
11694 case 6: u32EffAddr += pCtx->esi; break;
11695 case 7: u32EffAddr += pCtx->edi; break;
11696 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11697 }
11698 break;
11699 }
11700 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11701 case 6: u32EffAddr = pCtx->esi; break;
11702 case 7: u32EffAddr = pCtx->edi; break;
11703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11704 }
11705
11706 /* Get and add the displacement. */
11707 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11708 {
11709 case 0:
11710 break;
11711 case 1:
11712 {
11713 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11714 u32EffAddr += i8Disp;
11715 break;
11716 }
11717 case 2:
11718 {
11719 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11720 u32EffAddr += u32Disp;
11721 break;
11722 }
11723 default:
11724 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11725 }
11726
11727 }
11728 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11729 *pGCPtrEff = u32EffAddr;
11730 else
11731 {
11732 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11733 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11734 }
11735 }
11736 }
11737 else
11738 {
11739 uint64_t u64EffAddr;
11740
11741 /* Handle the rip+disp32 form with no registers first. */
11742 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11743 {
11744 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11745 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11746 }
11747 else
11748 {
11749 /* Get the register (or SIB) value. */
11750 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11751 {
11752 case 0: u64EffAddr = pCtx->rax; break;
11753 case 1: u64EffAddr = pCtx->rcx; break;
11754 case 2: u64EffAddr = pCtx->rdx; break;
11755 case 3: u64EffAddr = pCtx->rbx; break;
11756 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11757 case 6: u64EffAddr = pCtx->rsi; break;
11758 case 7: u64EffAddr = pCtx->rdi; break;
11759 case 8: u64EffAddr = pCtx->r8; break;
11760 case 9: u64EffAddr = pCtx->r9; break;
11761 case 10: u64EffAddr = pCtx->r10; break;
11762 case 11: u64EffAddr = pCtx->r11; break;
11763 case 13: u64EffAddr = pCtx->r13; break;
11764 case 14: u64EffAddr = pCtx->r14; break;
11765 case 15: u64EffAddr = pCtx->r15; break;
11766 /* SIB */
11767 case 4:
11768 case 12:
11769 {
11770 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11771
11772 /* Get the index and scale it. */
11773 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11774 {
11775 case 0: u64EffAddr = pCtx->rax; break;
11776 case 1: u64EffAddr = pCtx->rcx; break;
11777 case 2: u64EffAddr = pCtx->rdx; break;
11778 case 3: u64EffAddr = pCtx->rbx; break;
11779 case 4: u64EffAddr = 0; /*none */ break;
11780 case 5: u64EffAddr = pCtx->rbp; break;
11781 case 6: u64EffAddr = pCtx->rsi; break;
11782 case 7: u64EffAddr = pCtx->rdi; break;
11783 case 8: u64EffAddr = pCtx->r8; break;
11784 case 9: u64EffAddr = pCtx->r9; break;
11785 case 10: u64EffAddr = pCtx->r10; break;
11786 case 11: u64EffAddr = pCtx->r11; break;
11787 case 12: u64EffAddr = pCtx->r12; break;
11788 case 13: u64EffAddr = pCtx->r13; break;
11789 case 14: u64EffAddr = pCtx->r14; break;
11790 case 15: u64EffAddr = pCtx->r15; break;
11791 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11792 }
11793 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11794
11795 /* add base */
11796 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11797 {
11798 case 0: u64EffAddr += pCtx->rax; break;
11799 case 1: u64EffAddr += pCtx->rcx; break;
11800 case 2: u64EffAddr += pCtx->rdx; break;
11801 case 3: u64EffAddr += pCtx->rbx; break;
11802 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11803 case 6: u64EffAddr += pCtx->rsi; break;
11804 case 7: u64EffAddr += pCtx->rdi; break;
11805 case 8: u64EffAddr += pCtx->r8; break;
11806 case 9: u64EffAddr += pCtx->r9; break;
11807 case 10: u64EffAddr += pCtx->r10; break;
11808 case 11: u64EffAddr += pCtx->r11; break;
11809 case 12: u64EffAddr += pCtx->r12; break;
11810 case 14: u64EffAddr += pCtx->r14; break;
11811 case 15: u64EffAddr += pCtx->r15; break;
11812 /* complicated encodings */
11813 case 5:
11814 case 13:
11815 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11816 {
11817 if (!pVCpu->iem.s.uRexB)
11818 {
11819 u64EffAddr += pCtx->rbp;
11820 SET_SS_DEF();
11821 }
11822 else
11823 u64EffAddr += pCtx->r13;
11824 }
11825 else
11826 {
11827 uint32_t u32Disp;
11828 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11829 u64EffAddr += (int32_t)u32Disp;
11830 }
11831 break;
11832 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11833 }
11834 break;
11835 }
11836 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11837 }
11838
11839 /* Get and add the displacement. */
11840 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11841 {
11842 case 0:
11843 break;
11844 case 1:
11845 {
11846 int8_t i8Disp;
11847 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11848 u64EffAddr += i8Disp;
11849 break;
11850 }
11851 case 2:
11852 {
11853 uint32_t u32Disp;
11854 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11855 u64EffAddr += (int32_t)u32Disp;
11856 break;
11857 }
11858 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11859 }
11860
11861 }
11862
11863 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11864 *pGCPtrEff = u64EffAddr;
11865 else
11866 {
11867 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11868 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11869 }
11870 }
11871
11872 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11873 return VINF_SUCCESS;
11874}
11875
11876
11877/**
11878 * Calculates the effective address of a ModR/M memory operand.
11879 *
11880 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11881 *
11882 * @return Strict VBox status code.
11883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11884 * @param bRm The ModRM byte.
11885 * @param cbImm The size of any immediate following the
11886 * effective address opcode bytes. Important for
11887 * RIP relative addressing.
11888 * @param pGCPtrEff Where to return the effective address.
11889 * @param offRsp RSP displacement.
11890 */
11891IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11892{
11893 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11894 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11895# define SET_SS_DEF() \
11896 do \
11897 { \
11898 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11899 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11900 } while (0)
11901
11902 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11903 {
11904/** @todo Check the effective address size crap! */
11905 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11906 {
11907 uint16_t u16EffAddr;
11908
11909 /* Handle the disp16 form with no registers first. */
11910 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11911 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11912 else
11913 {
11914 /* Get the displacment. */
11915 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11916 {
11917 case 0: u16EffAddr = 0; break;
11918 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11919 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11920 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11921 }
11922
11923 /* Add the base and index registers to the disp. */
11924 switch (bRm & X86_MODRM_RM_MASK)
11925 {
11926 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11927 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11928 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11929 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11930 case 4: u16EffAddr += pCtx->si; break;
11931 case 5: u16EffAddr += pCtx->di; break;
11932 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11933 case 7: u16EffAddr += pCtx->bx; break;
11934 }
11935 }
11936
11937 *pGCPtrEff = u16EffAddr;
11938 }
11939 else
11940 {
11941 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11942 uint32_t u32EffAddr;
11943
11944 /* Handle the disp32 form with no registers first. */
11945 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11946 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11947 else
11948 {
11949 /* Get the register (or SIB) value. */
11950 switch ((bRm & X86_MODRM_RM_MASK))
11951 {
11952 case 0: u32EffAddr = pCtx->eax; break;
11953 case 1: u32EffAddr = pCtx->ecx; break;
11954 case 2: u32EffAddr = pCtx->edx; break;
11955 case 3: u32EffAddr = pCtx->ebx; break;
11956 case 4: /* SIB */
11957 {
11958 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11959
11960 /* Get the index and scale it. */
11961 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11962 {
11963 case 0: u32EffAddr = pCtx->eax; break;
11964 case 1: u32EffAddr = pCtx->ecx; break;
11965 case 2: u32EffAddr = pCtx->edx; break;
11966 case 3: u32EffAddr = pCtx->ebx; break;
11967 case 4: u32EffAddr = 0; /*none */ break;
11968 case 5: u32EffAddr = pCtx->ebp; break;
11969 case 6: u32EffAddr = pCtx->esi; break;
11970 case 7: u32EffAddr = pCtx->edi; break;
11971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11972 }
11973 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11974
11975 /* add base */
11976 switch (bSib & X86_SIB_BASE_MASK)
11977 {
11978 case 0: u32EffAddr += pCtx->eax; break;
11979 case 1: u32EffAddr += pCtx->ecx; break;
11980 case 2: u32EffAddr += pCtx->edx; break;
11981 case 3: u32EffAddr += pCtx->ebx; break;
11982 case 4:
11983 u32EffAddr += pCtx->esp + offRsp;
11984 SET_SS_DEF();
11985 break;
11986 case 5:
11987 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11988 {
11989 u32EffAddr += pCtx->ebp;
11990 SET_SS_DEF();
11991 }
11992 else
11993 {
11994 uint32_t u32Disp;
11995 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11996 u32EffAddr += u32Disp;
11997 }
11998 break;
11999 case 6: u32EffAddr += pCtx->esi; break;
12000 case 7: u32EffAddr += pCtx->edi; break;
12001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12002 }
12003 break;
12004 }
12005 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12006 case 6: u32EffAddr = pCtx->esi; break;
12007 case 7: u32EffAddr = pCtx->edi; break;
12008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12009 }
12010
12011 /* Get and add the displacement. */
12012 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12013 {
12014 case 0:
12015 break;
12016 case 1:
12017 {
12018 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12019 u32EffAddr += i8Disp;
12020 break;
12021 }
12022 case 2:
12023 {
12024 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12025 u32EffAddr += u32Disp;
12026 break;
12027 }
12028 default:
12029 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12030 }
12031
12032 }
12033 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12034 *pGCPtrEff = u32EffAddr;
12035 else
12036 {
12037 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12038 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12039 }
12040 }
12041 }
12042 else
12043 {
12044 uint64_t u64EffAddr;
12045
12046 /* Handle the rip+disp32 form with no registers first. */
12047 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12048 {
12049 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12050 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12051 }
12052 else
12053 {
12054 /* Get the register (or SIB) value. */
12055 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12056 {
12057 case 0: u64EffAddr = pCtx->rax; break;
12058 case 1: u64EffAddr = pCtx->rcx; break;
12059 case 2: u64EffAddr = pCtx->rdx; break;
12060 case 3: u64EffAddr = pCtx->rbx; break;
12061 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12062 case 6: u64EffAddr = pCtx->rsi; break;
12063 case 7: u64EffAddr = pCtx->rdi; break;
12064 case 8: u64EffAddr = pCtx->r8; break;
12065 case 9: u64EffAddr = pCtx->r9; break;
12066 case 10: u64EffAddr = pCtx->r10; break;
12067 case 11: u64EffAddr = pCtx->r11; break;
12068 case 13: u64EffAddr = pCtx->r13; break;
12069 case 14: u64EffAddr = pCtx->r14; break;
12070 case 15: u64EffAddr = pCtx->r15; break;
12071 /* SIB */
12072 case 4:
12073 case 12:
12074 {
12075 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12076
12077 /* Get the index and scale it. */
12078 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12079 {
12080 case 0: u64EffAddr = pCtx->rax; break;
12081 case 1: u64EffAddr = pCtx->rcx; break;
12082 case 2: u64EffAddr = pCtx->rdx; break;
12083 case 3: u64EffAddr = pCtx->rbx; break;
12084 case 4: u64EffAddr = 0; /*none */ break;
12085 case 5: u64EffAddr = pCtx->rbp; break;
12086 case 6: u64EffAddr = pCtx->rsi; break;
12087 case 7: u64EffAddr = pCtx->rdi; break;
12088 case 8: u64EffAddr = pCtx->r8; break;
12089 case 9: u64EffAddr = pCtx->r9; break;
12090 case 10: u64EffAddr = pCtx->r10; break;
12091 case 11: u64EffAddr = pCtx->r11; break;
12092 case 12: u64EffAddr = pCtx->r12; break;
12093 case 13: u64EffAddr = pCtx->r13; break;
12094 case 14: u64EffAddr = pCtx->r14; break;
12095 case 15: u64EffAddr = pCtx->r15; break;
12096 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12097 }
12098 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12099
12100 /* add base */
12101 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12102 {
12103 case 0: u64EffAddr += pCtx->rax; break;
12104 case 1: u64EffAddr += pCtx->rcx; break;
12105 case 2: u64EffAddr += pCtx->rdx; break;
12106 case 3: u64EffAddr += pCtx->rbx; break;
12107 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12108 case 6: u64EffAddr += pCtx->rsi; break;
12109 case 7: u64EffAddr += pCtx->rdi; break;
12110 case 8: u64EffAddr += pCtx->r8; break;
12111 case 9: u64EffAddr += pCtx->r9; break;
12112 case 10: u64EffAddr += pCtx->r10; break;
12113 case 11: u64EffAddr += pCtx->r11; break;
12114 case 12: u64EffAddr += pCtx->r12; break;
12115 case 14: u64EffAddr += pCtx->r14; break;
12116 case 15: u64EffAddr += pCtx->r15; break;
12117 /* complicated encodings */
12118 case 5:
12119 case 13:
12120 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12121 {
12122 if (!pVCpu->iem.s.uRexB)
12123 {
12124 u64EffAddr += pCtx->rbp;
12125 SET_SS_DEF();
12126 }
12127 else
12128 u64EffAddr += pCtx->r13;
12129 }
12130 else
12131 {
12132 uint32_t u32Disp;
12133 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12134 u64EffAddr += (int32_t)u32Disp;
12135 }
12136 break;
12137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12138 }
12139 break;
12140 }
12141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12142 }
12143
12144 /* Get and add the displacement. */
12145 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12146 {
12147 case 0:
12148 break;
12149 case 1:
12150 {
12151 int8_t i8Disp;
12152 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12153 u64EffAddr += i8Disp;
12154 break;
12155 }
12156 case 2:
12157 {
12158 uint32_t u32Disp;
12159 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12160 u64EffAddr += (int32_t)u32Disp;
12161 break;
12162 }
12163 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12164 }
12165
12166 }
12167
12168 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12169 *pGCPtrEff = u64EffAddr;
12170 else
12171 {
12172 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12173 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12174 }
12175 }
12176
12177 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12178 return VINF_SUCCESS;
12179}
12180
12181
12182#ifdef IEM_WITH_SETJMP
12183/**
12184 * Calculates the effective address of a ModR/M memory operand.
12185 *
12186 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12187 *
12188 * May longjmp on internal error.
12189 *
12190 * @return The effective address.
12191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12192 * @param bRm The ModRM byte.
12193 * @param cbImm The size of any immediate following the
12194 * effective address opcode bytes. Important for
12195 * RIP relative addressing.
12196 */
12197IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12198{
12199 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12200 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12201# define SET_SS_DEF() \
12202 do \
12203 { \
12204 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12205 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12206 } while (0)
12207
12208 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12209 {
12210/** @todo Check the effective address size crap! */
12211 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12212 {
12213 uint16_t u16EffAddr;
12214
12215 /* Handle the disp16 form with no registers first. */
12216 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12217 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12218 else
12219 {
12220 /* Get the displacment. */
12221 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12222 {
12223 case 0: u16EffAddr = 0; break;
12224 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12225 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12226 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12227 }
12228
12229 /* Add the base and index registers to the disp. */
12230 switch (bRm & X86_MODRM_RM_MASK)
12231 {
12232 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12233 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12234 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12235 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12236 case 4: u16EffAddr += pCtx->si; break;
12237 case 5: u16EffAddr += pCtx->di; break;
12238 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12239 case 7: u16EffAddr += pCtx->bx; break;
12240 }
12241 }
12242
12243 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12244 return u16EffAddr;
12245 }
12246
12247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12248 uint32_t u32EffAddr;
12249
12250 /* Handle the disp32 form with no registers first. */
12251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12252 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12253 else
12254 {
12255 /* Get the register (or SIB) value. */
12256 switch ((bRm & X86_MODRM_RM_MASK))
12257 {
12258 case 0: u32EffAddr = pCtx->eax; break;
12259 case 1: u32EffAddr = pCtx->ecx; break;
12260 case 2: u32EffAddr = pCtx->edx; break;
12261 case 3: u32EffAddr = pCtx->ebx; break;
12262 case 4: /* SIB */
12263 {
12264 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12265
12266 /* Get the index and scale it. */
12267 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12268 {
12269 case 0: u32EffAddr = pCtx->eax; break;
12270 case 1: u32EffAddr = pCtx->ecx; break;
12271 case 2: u32EffAddr = pCtx->edx; break;
12272 case 3: u32EffAddr = pCtx->ebx; break;
12273 case 4: u32EffAddr = 0; /*none */ break;
12274 case 5: u32EffAddr = pCtx->ebp; break;
12275 case 6: u32EffAddr = pCtx->esi; break;
12276 case 7: u32EffAddr = pCtx->edi; break;
12277 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12278 }
12279 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12280
12281 /* add base */
12282 switch (bSib & X86_SIB_BASE_MASK)
12283 {
12284 case 0: u32EffAddr += pCtx->eax; break;
12285 case 1: u32EffAddr += pCtx->ecx; break;
12286 case 2: u32EffAddr += pCtx->edx; break;
12287 case 3: u32EffAddr += pCtx->ebx; break;
12288 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12289 case 5:
12290 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12291 {
12292 u32EffAddr += pCtx->ebp;
12293 SET_SS_DEF();
12294 }
12295 else
12296 {
12297 uint32_t u32Disp;
12298 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12299 u32EffAddr += u32Disp;
12300 }
12301 break;
12302 case 6: u32EffAddr += pCtx->esi; break;
12303 case 7: u32EffAddr += pCtx->edi; break;
12304 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12305 }
12306 break;
12307 }
12308 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12309 case 6: u32EffAddr = pCtx->esi; break;
12310 case 7: u32EffAddr = pCtx->edi; break;
12311 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12312 }
12313
12314 /* Get and add the displacement. */
12315 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12316 {
12317 case 0:
12318 break;
12319 case 1:
12320 {
12321 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12322 u32EffAddr += i8Disp;
12323 break;
12324 }
12325 case 2:
12326 {
12327 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12328 u32EffAddr += u32Disp;
12329 break;
12330 }
12331 default:
12332 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12333 }
12334 }
12335
12336 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12337 {
12338 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12339 return u32EffAddr;
12340 }
12341 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12342 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12343 return u32EffAddr & UINT16_MAX;
12344 }
12345
12346 uint64_t u64EffAddr;
12347
12348 /* Handle the rip+disp32 form with no registers first. */
12349 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12350 {
12351 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12352 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12353 }
12354 else
12355 {
12356 /* Get the register (or SIB) value. */
12357 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12358 {
12359 case 0: u64EffAddr = pCtx->rax; break;
12360 case 1: u64EffAddr = pCtx->rcx; break;
12361 case 2: u64EffAddr = pCtx->rdx; break;
12362 case 3: u64EffAddr = pCtx->rbx; break;
12363 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12364 case 6: u64EffAddr = pCtx->rsi; break;
12365 case 7: u64EffAddr = pCtx->rdi; break;
12366 case 8: u64EffAddr = pCtx->r8; break;
12367 case 9: u64EffAddr = pCtx->r9; break;
12368 case 10: u64EffAddr = pCtx->r10; break;
12369 case 11: u64EffAddr = pCtx->r11; break;
12370 case 13: u64EffAddr = pCtx->r13; break;
12371 case 14: u64EffAddr = pCtx->r14; break;
12372 case 15: u64EffAddr = pCtx->r15; break;
12373 /* SIB */
12374 case 4:
12375 case 12:
12376 {
12377 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12378
12379 /* Get the index and scale it. */
12380 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12381 {
12382 case 0: u64EffAddr = pCtx->rax; break;
12383 case 1: u64EffAddr = pCtx->rcx; break;
12384 case 2: u64EffAddr = pCtx->rdx; break;
12385 case 3: u64EffAddr = pCtx->rbx; break;
12386 case 4: u64EffAddr = 0; /*none */ break;
12387 case 5: u64EffAddr = pCtx->rbp; break;
12388 case 6: u64EffAddr = pCtx->rsi; break;
12389 case 7: u64EffAddr = pCtx->rdi; break;
12390 case 8: u64EffAddr = pCtx->r8; break;
12391 case 9: u64EffAddr = pCtx->r9; break;
12392 case 10: u64EffAddr = pCtx->r10; break;
12393 case 11: u64EffAddr = pCtx->r11; break;
12394 case 12: u64EffAddr = pCtx->r12; break;
12395 case 13: u64EffAddr = pCtx->r13; break;
12396 case 14: u64EffAddr = pCtx->r14; break;
12397 case 15: u64EffAddr = pCtx->r15; break;
12398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12399 }
12400 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12401
12402 /* add base */
12403 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12404 {
12405 case 0: u64EffAddr += pCtx->rax; break;
12406 case 1: u64EffAddr += pCtx->rcx; break;
12407 case 2: u64EffAddr += pCtx->rdx; break;
12408 case 3: u64EffAddr += pCtx->rbx; break;
12409 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12410 case 6: u64EffAddr += pCtx->rsi; break;
12411 case 7: u64EffAddr += pCtx->rdi; break;
12412 case 8: u64EffAddr += pCtx->r8; break;
12413 case 9: u64EffAddr += pCtx->r9; break;
12414 case 10: u64EffAddr += pCtx->r10; break;
12415 case 11: u64EffAddr += pCtx->r11; break;
12416 case 12: u64EffAddr += pCtx->r12; break;
12417 case 14: u64EffAddr += pCtx->r14; break;
12418 case 15: u64EffAddr += pCtx->r15; break;
12419 /* complicated encodings */
12420 case 5:
12421 case 13:
12422 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12423 {
12424 if (!pVCpu->iem.s.uRexB)
12425 {
12426 u64EffAddr += pCtx->rbp;
12427 SET_SS_DEF();
12428 }
12429 else
12430 u64EffAddr += pCtx->r13;
12431 }
12432 else
12433 {
12434 uint32_t u32Disp;
12435 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12436 u64EffAddr += (int32_t)u32Disp;
12437 }
12438 break;
12439 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12440 }
12441 break;
12442 }
12443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12444 }
12445
12446 /* Get and add the displacement. */
12447 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12448 {
12449 case 0:
12450 break;
12451 case 1:
12452 {
12453 int8_t i8Disp;
12454 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12455 u64EffAddr += i8Disp;
12456 break;
12457 }
12458 case 2:
12459 {
12460 uint32_t u32Disp;
12461 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12462 u64EffAddr += (int32_t)u32Disp;
12463 break;
12464 }
12465 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12466 }
12467
12468 }
12469
12470 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12471 {
12472 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12473 return u64EffAddr;
12474 }
12475 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12476 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12477 return u64EffAddr & UINT32_MAX;
12478}
12479#endif /* IEM_WITH_SETJMP */
12480
12481
12482/** @} */
12483
12484
12485
12486/*
12487 * Include the instructions
12488 */
12489#include "IEMAllInstructions.cpp.h"
12490
12491
12492
12493
12494#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12495
12496/**
12497 * Sets up execution verification mode.
12498 */
12499IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12500{
12501 PVMCPU pVCpu = pVCpu;
12502 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12503
12504 /*
12505 * Always note down the address of the current instruction.
12506 */
12507 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12508 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12509
12510 /*
12511 * Enable verification and/or logging.
12512 */
12513 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12514 if ( fNewNoRem
12515 && ( 0
12516#if 0 /* auto enable on first paged protected mode interrupt */
12517 || ( pOrgCtx->eflags.Bits.u1IF
12518 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12519 && TRPMHasTrap(pVCpu)
12520 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12521#endif
12522#if 0
12523 || ( pOrgCtx->cs == 0x10
12524 && ( pOrgCtx->rip == 0x90119e3e
12525 || pOrgCtx->rip == 0x901d9810)
12526#endif
12527#if 0 /* Auto enable DSL - FPU stuff. */
12528 || ( pOrgCtx->cs == 0x10
12529 && (// pOrgCtx->rip == 0xc02ec07f
12530 //|| pOrgCtx->rip == 0xc02ec082
12531 //|| pOrgCtx->rip == 0xc02ec0c9
12532 0
12533 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12534#endif
12535#if 0 /* Auto enable DSL - fstp st0 stuff. */
12536 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12537#endif
12538#if 0
12539 || pOrgCtx->rip == 0x9022bb3a
12540#endif
12541#if 0
12542 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12543#endif
12544#if 0
12545 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12546 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12547#endif
12548#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12549 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12550 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12551 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12552#endif
12553#if 0 /* NT4SP1 - xadd early boot. */
12554 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12555#endif
12556#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12557 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12558#endif
12559#if 0 /* NT4SP1 - cmpxchg (AMD). */
12560 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12561#endif
12562#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12563 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12564#endif
12565#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12566 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12567
12568#endif
12569#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12570 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12571
12572#endif
12573#if 0 /* NT4SP1 - frstor [ecx] */
12574 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12575#endif
12576#if 0 /* xxxxxx - All long mode code. */
12577 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12578#endif
12579#if 0 /* rep movsq linux 3.7 64-bit boot. */
12580 || (pOrgCtx->rip == 0x0000000000100241)
12581#endif
12582#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12583 || (pOrgCtx->rip == 0x000000000215e240)
12584#endif
12585#if 0 /* DOS's size-overridden iret to v8086. */
12586 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12587#endif
12588 )
12589 )
12590 {
12591 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12592 RTLogFlags(NULL, "enabled");
12593 fNewNoRem = false;
12594 }
12595 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12596 {
12597 pVCpu->iem.s.fNoRem = fNewNoRem;
12598 if (!fNewNoRem)
12599 {
12600 LogAlways(("Enabling verification mode!\n"));
12601 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12602 }
12603 else
12604 LogAlways(("Disabling verification mode!\n"));
12605 }
12606
12607 /*
12608 * Switch state.
12609 */
12610 if (IEM_VERIFICATION_ENABLED(pVCpu))
12611 {
12612 static CPUMCTX s_DebugCtx; /* Ugly! */
12613
12614 s_DebugCtx = *pOrgCtx;
12615 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12616 }
12617
12618 /*
12619 * See if there is an interrupt pending in TRPM and inject it if we can.
12620 */
12621 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12622 if ( pOrgCtx->eflags.Bits.u1IF
12623 && TRPMHasTrap(pVCpu)
12624 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12625 {
12626 uint8_t u8TrapNo;
12627 TRPMEVENT enmType;
12628 RTGCUINT uErrCode;
12629 RTGCPTR uCr2;
12630 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12631 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12632 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12633 TRPMResetTrap(pVCpu);
12634 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12635 }
12636
12637 /*
12638 * Reset the counters.
12639 */
12640 pVCpu->iem.s.cIOReads = 0;
12641 pVCpu->iem.s.cIOWrites = 0;
12642 pVCpu->iem.s.fIgnoreRaxRdx = false;
12643 pVCpu->iem.s.fOverlappingMovs = false;
12644 pVCpu->iem.s.fProblematicMemory = false;
12645 pVCpu->iem.s.fUndefinedEFlags = 0;
12646
12647 if (IEM_VERIFICATION_ENABLED(pVCpu))
12648 {
12649 /*
12650 * Free all verification records.
12651 */
12652 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12653 pVCpu->iem.s.pIemEvtRecHead = NULL;
12654 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12655 do
12656 {
12657 while (pEvtRec)
12658 {
12659 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12660 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12661 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12662 pEvtRec = pNext;
12663 }
12664 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12665 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12666 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12667 } while (pEvtRec);
12668 }
12669}
12670
12671
12672/**
12673 * Allocate an event record.
12674 * @returns Pointer to a record.
12675 */
12676IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12677{
12678 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12679 return NULL;
12680
12681 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12682 if (pEvtRec)
12683 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12684 else
12685 {
12686 if (!pVCpu->iem.s.ppIemEvtRecNext)
12687 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12688
12689 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12690 if (!pEvtRec)
12691 return NULL;
12692 }
12693 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12694 pEvtRec->pNext = NULL;
12695 return pEvtRec;
12696}
12697
12698
12699/**
12700 * IOMMMIORead notification.
12701 */
12702VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12703{
12704 PVMCPU pVCpu = VMMGetCpu(pVM);
12705 if (!pVCpu)
12706 return;
12707 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12708 if (!pEvtRec)
12709 return;
12710 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12711 pEvtRec->u.RamRead.GCPhys = GCPhys;
12712 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12713 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12714 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12715}
12716
12717
12718/**
12719 * IOMMMIOWrite notification.
12720 */
12721VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12722{
12723 PVMCPU pVCpu = VMMGetCpu(pVM);
12724 if (!pVCpu)
12725 return;
12726 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12727 if (!pEvtRec)
12728 return;
12729 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12730 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12731 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12732 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12733 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12734 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12735 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12736 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12737 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12738}
12739
12740
12741/**
12742 * IOMIOPortRead notification.
12743 */
12744VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12745{
12746 PVMCPU pVCpu = VMMGetCpu(pVM);
12747 if (!pVCpu)
12748 return;
12749 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12750 if (!pEvtRec)
12751 return;
12752 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12753 pEvtRec->u.IOPortRead.Port = Port;
12754 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12755 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12756 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12757}
12758
12759/**
12760 * IOMIOPortWrite notification.
12761 */
12762VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12763{
12764 PVMCPU pVCpu = VMMGetCpu(pVM);
12765 if (!pVCpu)
12766 return;
12767 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12768 if (!pEvtRec)
12769 return;
12770 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12771 pEvtRec->u.IOPortWrite.Port = Port;
12772 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12773 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12774 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12775 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12776}
12777
12778
12779VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12780{
12781 PVMCPU pVCpu = VMMGetCpu(pVM);
12782 if (!pVCpu)
12783 return;
12784 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12785 if (!pEvtRec)
12786 return;
12787 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12788 pEvtRec->u.IOPortStrRead.Port = Port;
12789 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12790 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12791 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12792 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12793}
12794
12795
12796VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12797{
12798 PVMCPU pVCpu = VMMGetCpu(pVM);
12799 if (!pVCpu)
12800 return;
12801 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12802 if (!pEvtRec)
12803 return;
12804 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12805 pEvtRec->u.IOPortStrWrite.Port = Port;
12806 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12807 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12808 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12809 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12810}
12811
12812
12813/**
12814 * Fakes and records an I/O port read.
12815 *
12816 * @returns VINF_SUCCESS.
12817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12818 * @param Port The I/O port.
12819 * @param pu32Value Where to store the fake value.
12820 * @param cbValue The size of the access.
12821 */
12822IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12823{
12824 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12825 if (pEvtRec)
12826 {
12827 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12828 pEvtRec->u.IOPortRead.Port = Port;
12829 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12830 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12831 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12832 }
12833 pVCpu->iem.s.cIOReads++;
12834 *pu32Value = 0xcccccccc;
12835 return VINF_SUCCESS;
12836}
12837
12838
12839/**
12840 * Fakes and records an I/O port write.
12841 *
12842 * @returns VINF_SUCCESS.
12843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12844 * @param Port The I/O port.
12845 * @param u32Value The value being written.
12846 * @param cbValue The size of the access.
12847 */
12848IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12849{
12850 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12851 if (pEvtRec)
12852 {
12853 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12854 pEvtRec->u.IOPortWrite.Port = Port;
12855 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12856 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12857 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12858 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12859 }
12860 pVCpu->iem.s.cIOWrites++;
12861 return VINF_SUCCESS;
12862}
12863
12864
12865/**
12866 * Used to add extra details about a stub case.
12867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12868 */
12869IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12870{
12871 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12872 PVM pVM = pVCpu->CTX_SUFF(pVM);
12873 PVMCPU pVCpu = pVCpu;
12874 char szRegs[4096];
12875 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12876 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12877 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12878 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12879 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12880 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12881 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12882 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12883 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12884 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12885 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12886 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12887 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12888 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12889 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12890 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12891 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12892 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12893 " efer=%016VR{efer}\n"
12894 " pat=%016VR{pat}\n"
12895 " sf_mask=%016VR{sf_mask}\n"
12896 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12897 " lstar=%016VR{lstar}\n"
12898 " star=%016VR{star} cstar=%016VR{cstar}\n"
12899 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12900 );
12901
12902 char szInstr1[256];
12903 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12904 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12905 szInstr1, sizeof(szInstr1), NULL);
12906 char szInstr2[256];
12907 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12908 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12909 szInstr2, sizeof(szInstr2), NULL);
12910
12911 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12912}
12913
12914
12915/**
12916 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12917 * dump to the assertion info.
12918 *
12919 * @param pEvtRec The record to dump.
12920 */
12921IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12922{
12923 switch (pEvtRec->enmEvent)
12924 {
12925 case IEMVERIFYEVENT_IOPORT_READ:
12926 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12927 pEvtRec->u.IOPortWrite.Port,
12928 pEvtRec->u.IOPortWrite.cbValue);
12929 break;
12930 case IEMVERIFYEVENT_IOPORT_WRITE:
12931 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12932 pEvtRec->u.IOPortWrite.Port,
12933 pEvtRec->u.IOPortWrite.cbValue,
12934 pEvtRec->u.IOPortWrite.u32Value);
12935 break;
12936 case IEMVERIFYEVENT_IOPORT_STR_READ:
12937 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12938 pEvtRec->u.IOPortStrWrite.Port,
12939 pEvtRec->u.IOPortStrWrite.cbValue,
12940 pEvtRec->u.IOPortStrWrite.cTransfers);
12941 break;
12942 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12943 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12944 pEvtRec->u.IOPortStrWrite.Port,
12945 pEvtRec->u.IOPortStrWrite.cbValue,
12946 pEvtRec->u.IOPortStrWrite.cTransfers);
12947 break;
12948 case IEMVERIFYEVENT_RAM_READ:
12949 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12950 pEvtRec->u.RamRead.GCPhys,
12951 pEvtRec->u.RamRead.cb);
12952 break;
12953 case IEMVERIFYEVENT_RAM_WRITE:
12954 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12955 pEvtRec->u.RamWrite.GCPhys,
12956 pEvtRec->u.RamWrite.cb,
12957 (int)pEvtRec->u.RamWrite.cb,
12958 pEvtRec->u.RamWrite.ab);
12959 break;
12960 default:
12961 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12962 break;
12963 }
12964}
12965
12966
12967/**
12968 * Raises an assertion on the specified record, showing the given message with
12969 * a record dump attached.
12970 *
12971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12972 * @param pEvtRec1 The first record.
12973 * @param pEvtRec2 The second record.
12974 * @param pszMsg The message explaining why we're asserting.
12975 */
12976IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12977{
12978 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12979 iemVerifyAssertAddRecordDump(pEvtRec1);
12980 iemVerifyAssertAddRecordDump(pEvtRec2);
12981 iemVerifyAssertMsg2(pVCpu);
12982 RTAssertPanic();
12983}
12984
12985
12986/**
12987 * Raises an assertion on the specified record, showing the given message with
12988 * a record dump attached.
12989 *
12990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12991 * @param pEvtRec1 The first record.
12992 * @param pszMsg The message explaining why we're asserting.
12993 */
12994IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
12995{
12996 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12997 iemVerifyAssertAddRecordDump(pEvtRec);
12998 iemVerifyAssertMsg2(pVCpu);
12999 RTAssertPanic();
13000}
13001
13002
13003/**
13004 * Verifies a write record.
13005 *
13006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13007 * @param pEvtRec The write record.
13008 * @param fRem Set if REM was doing the other executing. If clear
13009 * it was HM.
13010 */
13011IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13012{
13013 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13014 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13015 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13016 if ( RT_FAILURE(rc)
13017 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13018 {
13019 /* fend off ins */
13020 if ( !pVCpu->iem.s.cIOReads
13021 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13022 || ( pEvtRec->u.RamWrite.cb != 1
13023 && pEvtRec->u.RamWrite.cb != 2
13024 && pEvtRec->u.RamWrite.cb != 4) )
13025 {
13026 /* fend off ROMs and MMIO */
13027 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13028 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13029 {
13030 /* fend off fxsave */
13031 if (pEvtRec->u.RamWrite.cb != 512)
13032 {
13033 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13034 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13035 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13036 RTAssertMsg2Add("%s: %.*Rhxs\n"
13037 "iem: %.*Rhxs\n",
13038 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13039 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13040 iemVerifyAssertAddRecordDump(pEvtRec);
13041 iemVerifyAssertMsg2(pVCpu);
13042 RTAssertPanic();
13043 }
13044 }
13045 }
13046 }
13047
13048}
13049
13050/**
13051 * Performs the post-execution verfication checks.
13052 */
13053IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13054{
13055 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13056 return rcStrictIem;
13057
13058 /*
13059 * Switch back the state.
13060 */
13061 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13062 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13063 Assert(pOrgCtx != pDebugCtx);
13064 IEM_GET_CTX(pVCpu) = pOrgCtx;
13065
13066 /*
13067 * Execute the instruction in REM.
13068 */
13069 bool fRem = false;
13070 PVM pVM = pVCpu->CTX_SUFF(pVM);
13071 PVMCPU pVCpu = pVCpu;
13072 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13073#ifdef IEM_VERIFICATION_MODE_FULL_HM
13074 if ( HMIsEnabled(pVM)
13075 && pVCpu->iem.s.cIOReads == 0
13076 && pVCpu->iem.s.cIOWrites == 0
13077 && !pVCpu->iem.s.fProblematicMemory)
13078 {
13079 uint64_t uStartRip = pOrgCtx->rip;
13080 unsigned iLoops = 0;
13081 do
13082 {
13083 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13084 iLoops++;
13085 } while ( rc == VINF_SUCCESS
13086 || ( rc == VINF_EM_DBG_STEPPED
13087 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13088 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13089 || ( pOrgCtx->rip != pDebugCtx->rip
13090 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13091 && iLoops < 8) );
13092 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13093 rc = VINF_SUCCESS;
13094 }
13095#endif
13096 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13097 || rc == VINF_IOM_R3_IOPORT_READ
13098 || rc == VINF_IOM_R3_IOPORT_WRITE
13099 || rc == VINF_IOM_R3_MMIO_READ
13100 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13101 || rc == VINF_IOM_R3_MMIO_WRITE
13102 || rc == VINF_CPUM_R3_MSR_READ
13103 || rc == VINF_CPUM_R3_MSR_WRITE
13104 || rc == VINF_EM_RESCHEDULE
13105 )
13106 {
13107 EMRemLock(pVM);
13108 rc = REMR3EmulateInstruction(pVM, pVCpu);
13109 AssertRC(rc);
13110 EMRemUnlock(pVM);
13111 fRem = true;
13112 }
13113
13114# if 1 /* Skip unimplemented instructions for now. */
13115 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13116 {
13117 IEM_GET_CTX(pVCpu) = pOrgCtx;
13118 if (rc == VINF_EM_DBG_STEPPED)
13119 return VINF_SUCCESS;
13120 return rc;
13121 }
13122# endif
13123
13124 /*
13125 * Compare the register states.
13126 */
13127 unsigned cDiffs = 0;
13128 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13129 {
13130 //Log(("REM and IEM ends up with different registers!\n"));
13131 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13132
13133# define CHECK_FIELD(a_Field) \
13134 do \
13135 { \
13136 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13137 { \
13138 switch (sizeof(pOrgCtx->a_Field)) \
13139 { \
13140 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13141 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13142 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13143 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13144 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13145 } \
13146 cDiffs++; \
13147 } \
13148 } while (0)
13149# define CHECK_XSTATE_FIELD(a_Field) \
13150 do \
13151 { \
13152 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13153 { \
13154 switch (sizeof(pOrgXState->a_Field)) \
13155 { \
13156 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13157 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13158 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13159 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13160 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13161 } \
13162 cDiffs++; \
13163 } \
13164 } while (0)
13165
13166# define CHECK_BIT_FIELD(a_Field) \
13167 do \
13168 { \
13169 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13170 { \
13171 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13172 cDiffs++; \
13173 } \
13174 } while (0)
13175
13176# define CHECK_SEL(a_Sel) \
13177 do \
13178 { \
13179 CHECK_FIELD(a_Sel.Sel); \
13180 CHECK_FIELD(a_Sel.Attr.u); \
13181 CHECK_FIELD(a_Sel.u64Base); \
13182 CHECK_FIELD(a_Sel.u32Limit); \
13183 CHECK_FIELD(a_Sel.fFlags); \
13184 } while (0)
13185
13186 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13187 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13188
13189#if 1 /* The recompiler doesn't update these the intel way. */
13190 if (fRem)
13191 {
13192 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13193 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13194 pOrgXState->x87.CS = pDebugXState->x87.CS;
13195 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13196 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13197 pOrgXState->x87.DS = pDebugXState->x87.DS;
13198 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13199 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13200 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13201 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13202 }
13203#endif
13204 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13205 {
13206 RTAssertMsg2Weak(" the FPU state differs\n");
13207 cDiffs++;
13208 CHECK_XSTATE_FIELD(x87.FCW);
13209 CHECK_XSTATE_FIELD(x87.FSW);
13210 CHECK_XSTATE_FIELD(x87.FTW);
13211 CHECK_XSTATE_FIELD(x87.FOP);
13212 CHECK_XSTATE_FIELD(x87.FPUIP);
13213 CHECK_XSTATE_FIELD(x87.CS);
13214 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13215 CHECK_XSTATE_FIELD(x87.FPUDP);
13216 CHECK_XSTATE_FIELD(x87.DS);
13217 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13218 CHECK_XSTATE_FIELD(x87.MXCSR);
13219 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13220 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13221 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13222 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13223 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13224 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13225 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13226 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13227 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13228 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13229 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13230 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13231 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13232 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13233 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13234 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13235 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13236 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13237 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13238 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13239 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13240 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13241 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13242 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13243 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13244 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13245 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13246 }
13247 CHECK_FIELD(rip);
13248 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13249 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13250 {
13251 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13252 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13253 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13254 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13255 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13256 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13257 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13258 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13259 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13260 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13261 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13262 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13263 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13264 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13265 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13266 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13267 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13268 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13269 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13270 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13271 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13272 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13273 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13274 }
13275
13276 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13277 CHECK_FIELD(rax);
13278 CHECK_FIELD(rcx);
13279 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13280 CHECK_FIELD(rdx);
13281 CHECK_FIELD(rbx);
13282 CHECK_FIELD(rsp);
13283 CHECK_FIELD(rbp);
13284 CHECK_FIELD(rsi);
13285 CHECK_FIELD(rdi);
13286 CHECK_FIELD(r8);
13287 CHECK_FIELD(r9);
13288 CHECK_FIELD(r10);
13289 CHECK_FIELD(r11);
13290 CHECK_FIELD(r12);
13291 CHECK_FIELD(r13);
13292 CHECK_SEL(cs);
13293 CHECK_SEL(ss);
13294 CHECK_SEL(ds);
13295 CHECK_SEL(es);
13296 CHECK_SEL(fs);
13297 CHECK_SEL(gs);
13298 CHECK_FIELD(cr0);
13299
13300 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13301 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13302 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13303 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13304 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13305 {
13306 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13307 { /* ignore */ }
13308 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13309 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13310 && fRem)
13311 { /* ignore */ }
13312 else
13313 CHECK_FIELD(cr2);
13314 }
13315 CHECK_FIELD(cr3);
13316 CHECK_FIELD(cr4);
13317 CHECK_FIELD(dr[0]);
13318 CHECK_FIELD(dr[1]);
13319 CHECK_FIELD(dr[2]);
13320 CHECK_FIELD(dr[3]);
13321 CHECK_FIELD(dr[6]);
13322 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13323 CHECK_FIELD(dr[7]);
13324 CHECK_FIELD(gdtr.cbGdt);
13325 CHECK_FIELD(gdtr.pGdt);
13326 CHECK_FIELD(idtr.cbIdt);
13327 CHECK_FIELD(idtr.pIdt);
13328 CHECK_SEL(ldtr);
13329 CHECK_SEL(tr);
13330 CHECK_FIELD(SysEnter.cs);
13331 CHECK_FIELD(SysEnter.eip);
13332 CHECK_FIELD(SysEnter.esp);
13333 CHECK_FIELD(msrEFER);
13334 CHECK_FIELD(msrSTAR);
13335 CHECK_FIELD(msrPAT);
13336 CHECK_FIELD(msrLSTAR);
13337 CHECK_FIELD(msrCSTAR);
13338 CHECK_FIELD(msrSFMASK);
13339 CHECK_FIELD(msrKERNELGSBASE);
13340
13341 if (cDiffs != 0)
13342 {
13343 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13344 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13345 RTAssertPanic();
13346 static bool volatile s_fEnterDebugger = true;
13347 if (s_fEnterDebugger)
13348 DBGFSTOP(pVM);
13349
13350# if 1 /* Ignore unimplemented instructions for now. */
13351 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13352 rcStrictIem = VINF_SUCCESS;
13353# endif
13354 }
13355# undef CHECK_FIELD
13356# undef CHECK_BIT_FIELD
13357 }
13358
13359 /*
13360 * If the register state compared fine, check the verification event
13361 * records.
13362 */
13363 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13364 {
13365 /*
13366 * Compare verficiation event records.
13367 * - I/O port accesses should be a 1:1 match.
13368 */
13369 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13370 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13371 while (pIemRec && pOtherRec)
13372 {
13373 /* Since we might miss RAM writes and reads, ignore reads and check
13374 that any written memory is the same extra ones. */
13375 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13376 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13377 && pIemRec->pNext)
13378 {
13379 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13380 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13381 pIemRec = pIemRec->pNext;
13382 }
13383
13384 /* Do the compare. */
13385 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13386 {
13387 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13388 break;
13389 }
13390 bool fEquals;
13391 switch (pIemRec->enmEvent)
13392 {
13393 case IEMVERIFYEVENT_IOPORT_READ:
13394 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13395 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13396 break;
13397 case IEMVERIFYEVENT_IOPORT_WRITE:
13398 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13399 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13400 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13401 break;
13402 case IEMVERIFYEVENT_IOPORT_STR_READ:
13403 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13404 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13405 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13406 break;
13407 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13408 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13409 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13410 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13411 break;
13412 case IEMVERIFYEVENT_RAM_READ:
13413 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13414 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13415 break;
13416 case IEMVERIFYEVENT_RAM_WRITE:
13417 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13418 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13419 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13420 break;
13421 default:
13422 fEquals = false;
13423 break;
13424 }
13425 if (!fEquals)
13426 {
13427 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13428 break;
13429 }
13430
13431 /* advance */
13432 pIemRec = pIemRec->pNext;
13433 pOtherRec = pOtherRec->pNext;
13434 }
13435
13436 /* Ignore extra writes and reads. */
13437 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13438 {
13439 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13440 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13441 pIemRec = pIemRec->pNext;
13442 }
13443 if (pIemRec != NULL)
13444 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13445 else if (pOtherRec != NULL)
13446 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13447 }
13448 IEM_GET_CTX(pVCpu) = pOrgCtx;
13449
13450 return rcStrictIem;
13451}
13452
13453#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13454
13455/* stubs */
13456IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13457{
13458 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13459 return VERR_INTERNAL_ERROR;
13460}
13461
13462IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13463{
13464 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13465 return VERR_INTERNAL_ERROR;
13466}
13467
13468#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13469
13470
13471#ifdef LOG_ENABLED
13472/**
13473 * Logs the current instruction.
13474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13475 * @param pCtx The current CPU context.
13476 * @param fSameCtx Set if we have the same context information as the VMM,
13477 * clear if we may have already executed an instruction in
13478 * our debug context. When clear, we assume IEMCPU holds
13479 * valid CPU mode info.
13480 */
13481IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13482{
13483# ifdef IN_RING3
13484 if (LogIs2Enabled())
13485 {
13486 char szInstr[256];
13487 uint32_t cbInstr = 0;
13488 if (fSameCtx)
13489 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13490 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13491 szInstr, sizeof(szInstr), &cbInstr);
13492 else
13493 {
13494 uint32_t fFlags = 0;
13495 switch (pVCpu->iem.s.enmCpuMode)
13496 {
13497 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13498 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13499 case IEMMODE_16BIT:
13500 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13501 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13502 else
13503 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13504 break;
13505 }
13506 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13507 szInstr, sizeof(szInstr), &cbInstr);
13508 }
13509
13510 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13511 Log2(("****\n"
13512 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13513 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13514 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13515 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13516 " %s\n"
13517 ,
13518 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13519 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13520 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13521 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13522 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13523 szInstr));
13524
13525 if (LogIs3Enabled())
13526 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13527 }
13528 else
13529# endif
13530 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13531 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13532}
13533#endif
13534
13535
13536/**
13537 * Makes status code addjustments (pass up from I/O and access handler)
13538 * as well as maintaining statistics.
13539 *
13540 * @returns Strict VBox status code to pass up.
13541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13542 * @param rcStrict The status from executing an instruction.
13543 */
13544DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13545{
13546 if (rcStrict != VINF_SUCCESS)
13547 {
13548 if (RT_SUCCESS(rcStrict))
13549 {
13550 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13551 || rcStrict == VINF_IOM_R3_IOPORT_READ
13552 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13553 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13554 || rcStrict == VINF_IOM_R3_MMIO_READ
13555 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13556 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13557 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13558 || rcStrict == VINF_CPUM_R3_MSR_READ
13559 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13560 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13561 || rcStrict == VINF_EM_RAW_TO_R3
13562 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13563 /* raw-mode / virt handlers only: */
13564 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13565 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13566 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13567 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13568 || rcStrict == VINF_SELM_SYNC_GDT
13569 || rcStrict == VINF_CSAM_PENDING_ACTION
13570 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13571 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13572/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13573 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13574 if (rcPassUp == VINF_SUCCESS)
13575 pVCpu->iem.s.cRetInfStatuses++;
13576 else if ( rcPassUp < VINF_EM_FIRST
13577 || rcPassUp > VINF_EM_LAST
13578 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13579 {
13580 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13581 pVCpu->iem.s.cRetPassUpStatus++;
13582 rcStrict = rcPassUp;
13583 }
13584 else
13585 {
13586 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13587 pVCpu->iem.s.cRetInfStatuses++;
13588 }
13589 }
13590 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13591 pVCpu->iem.s.cRetAspectNotImplemented++;
13592 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13593 pVCpu->iem.s.cRetInstrNotImplemented++;
13594#ifdef IEM_VERIFICATION_MODE_FULL
13595 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13596 rcStrict = VINF_SUCCESS;
13597#endif
13598 else
13599 pVCpu->iem.s.cRetErrStatuses++;
13600 }
13601 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13602 {
13603 pVCpu->iem.s.cRetPassUpStatus++;
13604 rcStrict = pVCpu->iem.s.rcPassUp;
13605 }
13606
13607 return rcStrict;
13608}
13609
13610
13611/**
13612 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13613 * IEMExecOneWithPrefetchedByPC.
13614 *
13615 * Similar code is found in IEMExecLots.
13616 *
13617 * @return Strict VBox status code.
13618 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13620 * @param fExecuteInhibit If set, execute the instruction following CLI,
13621 * POP SS and MOV SS,GR.
13622 */
13623#ifdef __GNUC__
13624DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13625#else
13626DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13627#endif
13628{
13629#ifdef IEM_WITH_SETJMP
13630 VBOXSTRICTRC rcStrict;
13631 jmp_buf JmpBuf;
13632 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13633 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13634 if ((rcStrict = setjmp(JmpBuf)) == 0)
13635 {
13636 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13637 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13638 }
13639 else
13640 pVCpu->iem.s.cLongJumps++;
13641 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13642#else
13643 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13644 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13645#endif
13646 if (rcStrict == VINF_SUCCESS)
13647 pVCpu->iem.s.cInstructions++;
13648 if (pVCpu->iem.s.cActiveMappings > 0)
13649 {
13650 Assert(rcStrict != VINF_SUCCESS);
13651 iemMemRollback(pVCpu);
13652 }
13653//#ifdef DEBUG
13654// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13655//#endif
13656
13657 /* Execute the next instruction as well if a cli, pop ss or
13658 mov ss, Gr has just completed successfully. */
13659 if ( fExecuteInhibit
13660 && rcStrict == VINF_SUCCESS
13661 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13662 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13663 {
13664 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13665 if (rcStrict == VINF_SUCCESS)
13666 {
13667#ifdef LOG_ENABLED
13668 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13669#endif
13670#ifdef IEM_WITH_SETJMP
13671 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13672 if ((rcStrict = setjmp(JmpBuf)) == 0)
13673 {
13674 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13675 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13676 }
13677 else
13678 pVCpu->iem.s.cLongJumps++;
13679 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13680#else
13681 IEM_OPCODE_GET_NEXT_U8(&b);
13682 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13683#endif
13684 if (rcStrict == VINF_SUCCESS)
13685 pVCpu->iem.s.cInstructions++;
13686 if (pVCpu->iem.s.cActiveMappings > 0)
13687 {
13688 Assert(rcStrict != VINF_SUCCESS);
13689 iemMemRollback(pVCpu);
13690 }
13691 }
13692 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13693 }
13694
13695 /*
13696 * Return value fiddling, statistics and sanity assertions.
13697 */
13698 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13699
13700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13701 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13702#if defined(IEM_VERIFICATION_MODE_FULL)
13703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13704 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13705 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13706 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13707#endif
13708 return rcStrict;
13709}
13710
13711
13712#ifdef IN_RC
13713/**
13714 * Re-enters raw-mode or ensure we return to ring-3.
13715 *
13716 * @returns rcStrict, maybe modified.
13717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13718 * @param pCtx The current CPU context.
13719 * @param rcStrict The status code returne by the interpreter.
13720 */
13721DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13722{
13723 if ( !pVCpu->iem.s.fInPatchCode
13724 && ( rcStrict == VINF_SUCCESS
13725 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13726 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13727 {
13728 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13729 CPUMRawEnter(pVCpu);
13730 else
13731 {
13732 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13733 rcStrict = VINF_EM_RESCHEDULE;
13734 }
13735 }
13736 return rcStrict;
13737}
13738#endif
13739
13740
13741/**
13742 * Execute one instruction.
13743 *
13744 * @return Strict VBox status code.
13745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13746 */
13747VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13748{
13749#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13750 if (++pVCpu->iem.s.cVerifyDepth == 1)
13751 iemExecVerificationModeSetup(pVCpu);
13752#endif
13753#ifdef LOG_ENABLED
13754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13755 iemLogCurInstr(pVCpu, pCtx, true);
13756#endif
13757
13758 /*
13759 * Do the decoding and emulation.
13760 */
13761 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13762 if (rcStrict == VINF_SUCCESS)
13763 rcStrict = iemExecOneInner(pVCpu, true);
13764
13765#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13766 /*
13767 * Assert some sanity.
13768 */
13769 if (pVCpu->iem.s.cVerifyDepth == 1)
13770 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13771 pVCpu->iem.s.cVerifyDepth--;
13772#endif
13773#ifdef IN_RC
13774 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13775#endif
13776 if (rcStrict != VINF_SUCCESS)
13777 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13778 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13779 return rcStrict;
13780}
13781
13782
13783VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13784{
13785 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13786 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13787
13788 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13789 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13790 if (rcStrict == VINF_SUCCESS)
13791 {
13792 rcStrict = iemExecOneInner(pVCpu, true);
13793 if (pcbWritten)
13794 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13795 }
13796
13797#ifdef IN_RC
13798 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13799#endif
13800 return rcStrict;
13801}
13802
13803
13804VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13805 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13806{
13807 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13808 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13809
13810 VBOXSTRICTRC rcStrict;
13811 if ( cbOpcodeBytes
13812 && pCtx->rip == OpcodeBytesPC)
13813 {
13814 iemInitDecoder(pVCpu, false);
13815#ifdef IEM_WITH_CODE_TLB
13816 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13817 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13818 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13819 pVCpu->iem.s.offCurInstrStart = 0;
13820 pVCpu->iem.s.offInstrNextByte = 0;
13821#else
13822 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13823 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13824#endif
13825 rcStrict = VINF_SUCCESS;
13826 }
13827 else
13828 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13829 if (rcStrict == VINF_SUCCESS)
13830 {
13831 rcStrict = iemExecOneInner(pVCpu, true);
13832 }
13833
13834#ifdef IN_RC
13835 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13836#endif
13837 return rcStrict;
13838}
13839
13840
13841VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13842{
13843 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13844 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13845
13846 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13847 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13848 if (rcStrict == VINF_SUCCESS)
13849 {
13850 rcStrict = iemExecOneInner(pVCpu, false);
13851 if (pcbWritten)
13852 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13853 }
13854
13855#ifdef IN_RC
13856 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13857#endif
13858 return rcStrict;
13859}
13860
13861
13862VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13863 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13864{
13865 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13866 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13867
13868 VBOXSTRICTRC rcStrict;
13869 if ( cbOpcodeBytes
13870 && pCtx->rip == OpcodeBytesPC)
13871 {
13872 iemInitDecoder(pVCpu, true);
13873#ifdef IEM_WITH_CODE_TLB
13874 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13875 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13876 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13877 pVCpu->iem.s.offCurInstrStart = 0;
13878 pVCpu->iem.s.offInstrNextByte = 0;
13879#else
13880 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13881 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13882#endif
13883 rcStrict = VINF_SUCCESS;
13884 }
13885 else
13886 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13887 if (rcStrict == VINF_SUCCESS)
13888 rcStrict = iemExecOneInner(pVCpu, false);
13889
13890#ifdef IN_RC
13891 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13892#endif
13893 return rcStrict;
13894}
13895
13896
13897/**
13898 * For debugging DISGetParamSize, may come in handy.
13899 *
13900 * @returns Strict VBox status code.
13901 * @param pVCpu The cross context virtual CPU structure of the
13902 * calling EMT.
13903 * @param pCtxCore The context core structure.
13904 * @param OpcodeBytesPC The PC of the opcode bytes.
13905 * @param pvOpcodeBytes Prefeched opcode bytes.
13906 * @param cbOpcodeBytes Number of prefetched bytes.
13907 * @param pcbWritten Where to return the number of bytes written.
13908 * Optional.
13909 */
13910VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13911 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13912 uint32_t *pcbWritten)
13913{
13914 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13915 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13916
13917 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13918 VBOXSTRICTRC rcStrict;
13919 if ( cbOpcodeBytes
13920 && pCtx->rip == OpcodeBytesPC)
13921 {
13922 iemInitDecoder(pVCpu, true);
13923#ifdef IEM_WITH_CODE_TLB
13924 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13925 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13926 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13927 pVCpu->iem.s.offCurInstrStart = 0;
13928 pVCpu->iem.s.offInstrNextByte = 0;
13929#else
13930 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13931 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13932#endif
13933 rcStrict = VINF_SUCCESS;
13934 }
13935 else
13936 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13937 if (rcStrict == VINF_SUCCESS)
13938 {
13939 rcStrict = iemExecOneInner(pVCpu, false);
13940 if (pcbWritten)
13941 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13942 }
13943
13944#ifdef IN_RC
13945 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13946#endif
13947 return rcStrict;
13948}
13949
13950
13951VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13952{
13953 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13954
13955#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13956 /*
13957 * See if there is an interrupt pending in TRPM, inject it if we can.
13958 */
13959 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13960# ifdef IEM_VERIFICATION_MODE_FULL
13961 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13962# endif
13963 if ( pCtx->eflags.Bits.u1IF
13964 && TRPMHasTrap(pVCpu)
13965 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13966 {
13967 uint8_t u8TrapNo;
13968 TRPMEVENT enmType;
13969 RTGCUINT uErrCode;
13970 RTGCPTR uCr2;
13971 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13972 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13973 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13974 TRPMResetTrap(pVCpu);
13975 }
13976
13977 /*
13978 * Log the state.
13979 */
13980# ifdef LOG_ENABLED
13981 iemLogCurInstr(pVCpu, pCtx, true);
13982# endif
13983
13984 /*
13985 * Do the decoding and emulation.
13986 */
13987 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13988 if (rcStrict == VINF_SUCCESS)
13989 rcStrict = iemExecOneInner(pVCpu, true);
13990
13991 /*
13992 * Assert some sanity.
13993 */
13994 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13995
13996 /*
13997 * Log and return.
13998 */
13999 if (rcStrict != VINF_SUCCESS)
14000 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14001 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14002 if (pcInstructions)
14003 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14004 return rcStrict;
14005
14006#else /* Not verification mode */
14007
14008 /*
14009 * See if there is an interrupt pending in TRPM, inject it if we can.
14010 */
14011 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14012# ifdef IEM_VERIFICATION_MODE_FULL
14013 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14014# endif
14015 if ( pCtx->eflags.Bits.u1IF
14016 && TRPMHasTrap(pVCpu)
14017 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14018 {
14019 uint8_t u8TrapNo;
14020 TRPMEVENT enmType;
14021 RTGCUINT uErrCode;
14022 RTGCPTR uCr2;
14023 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14024 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14025 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14026 TRPMResetTrap(pVCpu);
14027 }
14028
14029 /*
14030 * Initial decoder init w/ prefetch, then setup setjmp.
14031 */
14032 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14033 if (rcStrict == VINF_SUCCESS)
14034 {
14035# ifdef IEM_WITH_SETJMP
14036 jmp_buf JmpBuf;
14037 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14038 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14039 pVCpu->iem.s.cActiveMappings = 0;
14040 if ((rcStrict = setjmp(JmpBuf)) == 0)
14041# endif
14042 {
14043 /*
14044 * The run loop. We limit ourselves to 4096 instructions right now.
14045 */
14046 PVM pVM = pVCpu->CTX_SUFF(pVM);
14047 uint32_t cInstr = 4096;
14048 for (;;)
14049 {
14050 /*
14051 * Log the state.
14052 */
14053# ifdef LOG_ENABLED
14054 iemLogCurInstr(pVCpu, pCtx, true);
14055# endif
14056
14057 /*
14058 * Do the decoding and emulation.
14059 */
14060 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14061 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14062 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14063 {
14064 Assert(pVCpu->iem.s.cActiveMappings == 0);
14065 pVCpu->iem.s.cInstructions++;
14066 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14067 {
14068 uint32_t fCpu = pVCpu->fLocalForcedActions
14069 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14070 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14071 | VMCPU_FF_TLB_FLUSH
14072# ifdef VBOX_WITH_RAW_MODE
14073 | VMCPU_FF_TRPM_SYNC_IDT
14074 | VMCPU_FF_SELM_SYNC_TSS
14075 | VMCPU_FF_SELM_SYNC_GDT
14076 | VMCPU_FF_SELM_SYNC_LDT
14077# endif
14078 | VMCPU_FF_INHIBIT_INTERRUPTS
14079 | VMCPU_FF_BLOCK_NMIS ));
14080
14081 if (RT_LIKELY( ( !fCpu
14082 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14083 && !pCtx->rflags.Bits.u1IF) )
14084 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14085 {
14086 if (cInstr-- > 0)
14087 {
14088 Assert(pVCpu->iem.s.cActiveMappings == 0);
14089 iemReInitDecoder(pVCpu);
14090 continue;
14091 }
14092 }
14093 }
14094 Assert(pVCpu->iem.s.cActiveMappings == 0);
14095 }
14096 else if (pVCpu->iem.s.cActiveMappings > 0)
14097 iemMemRollback(pVCpu);
14098 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14099 break;
14100 }
14101 }
14102# ifdef IEM_WITH_SETJMP
14103 else
14104 {
14105 if (pVCpu->iem.s.cActiveMappings > 0)
14106 iemMemRollback(pVCpu);
14107 pVCpu->iem.s.cLongJumps++;
14108 }
14109 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14110# endif
14111
14112 /*
14113 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14114 */
14115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14117# if defined(IEM_VERIFICATION_MODE_FULL)
14118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14122# endif
14123 }
14124
14125 /*
14126 * Maybe re-enter raw-mode and log.
14127 */
14128# ifdef IN_RC
14129 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14130# endif
14131 if (rcStrict != VINF_SUCCESS)
14132 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14133 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14134 if (pcInstructions)
14135 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14136 return rcStrict;
14137#endif /* Not verification mode */
14138}
14139
14140
14141
14142/**
14143 * Injects a trap, fault, abort, software interrupt or external interrupt.
14144 *
14145 * The parameter list matches TRPMQueryTrapAll pretty closely.
14146 *
14147 * @returns Strict VBox status code.
14148 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14149 * @param u8TrapNo The trap number.
14150 * @param enmType What type is it (trap/fault/abort), software
14151 * interrupt or hardware interrupt.
14152 * @param uErrCode The error code if applicable.
14153 * @param uCr2 The CR2 value if applicable.
14154 * @param cbInstr The instruction length (only relevant for
14155 * software interrupts).
14156 */
14157VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14158 uint8_t cbInstr)
14159{
14160 iemInitDecoder(pVCpu, false);
14161#ifdef DBGFTRACE_ENABLED
14162 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14163 u8TrapNo, enmType, uErrCode, uCr2);
14164#endif
14165
14166 uint32_t fFlags;
14167 switch (enmType)
14168 {
14169 case TRPM_HARDWARE_INT:
14170 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14171 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14172 uErrCode = uCr2 = 0;
14173 break;
14174
14175 case TRPM_SOFTWARE_INT:
14176 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14177 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14178 uErrCode = uCr2 = 0;
14179 break;
14180
14181 case TRPM_TRAP:
14182 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14183 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14184 if (u8TrapNo == X86_XCPT_PF)
14185 fFlags |= IEM_XCPT_FLAGS_CR2;
14186 switch (u8TrapNo)
14187 {
14188 case X86_XCPT_DF:
14189 case X86_XCPT_TS:
14190 case X86_XCPT_NP:
14191 case X86_XCPT_SS:
14192 case X86_XCPT_PF:
14193 case X86_XCPT_AC:
14194 fFlags |= IEM_XCPT_FLAGS_ERR;
14195 break;
14196
14197 case X86_XCPT_NMI:
14198 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14199 break;
14200 }
14201 break;
14202
14203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14204 }
14205
14206 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14207}
14208
14209
14210/**
14211 * Injects the active TRPM event.
14212 *
14213 * @returns Strict VBox status code.
14214 * @param pVCpu The cross context virtual CPU structure.
14215 */
14216VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14217{
14218#ifndef IEM_IMPLEMENTS_TASKSWITCH
14219 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14220#else
14221 uint8_t u8TrapNo;
14222 TRPMEVENT enmType;
14223 RTGCUINT uErrCode;
14224 RTGCUINTPTR uCr2;
14225 uint8_t cbInstr;
14226 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14227 if (RT_FAILURE(rc))
14228 return rc;
14229
14230 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14231
14232 /** @todo Are there any other codes that imply the event was successfully
14233 * delivered to the guest? See @bugref{6607}. */
14234 if ( rcStrict == VINF_SUCCESS
14235 || rcStrict == VINF_IEM_RAISED_XCPT)
14236 {
14237 TRPMResetTrap(pVCpu);
14238 }
14239 return rcStrict;
14240#endif
14241}
14242
14243
14244VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14245{
14246 return VERR_NOT_IMPLEMENTED;
14247}
14248
14249
14250VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14251{
14252 return VERR_NOT_IMPLEMENTED;
14253}
14254
14255
14256#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14257/**
14258 * Executes a IRET instruction with default operand size.
14259 *
14260 * This is for PATM.
14261 *
14262 * @returns VBox status code.
14263 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14264 * @param pCtxCore The register frame.
14265 */
14266VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14267{
14268 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14269
14270 iemCtxCoreToCtx(pCtx, pCtxCore);
14271 iemInitDecoder(pVCpu);
14272 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14273 if (rcStrict == VINF_SUCCESS)
14274 iemCtxToCtxCore(pCtxCore, pCtx);
14275 else
14276 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14277 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14278 return rcStrict;
14279}
14280#endif
14281
14282
14283/**
14284 * Macro used by the IEMExec* method to check the given instruction length.
14285 *
14286 * Will return on failure!
14287 *
14288 * @param a_cbInstr The given instruction length.
14289 * @param a_cbMin The minimum length.
14290 */
14291#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14292 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14293 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14294
14295
14296/**
14297 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14298 *
14299 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14300 *
14301 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14303 * @param rcStrict The status code to fiddle.
14304 */
14305DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14306{
14307 iemUninitExec(pVCpu);
14308#ifdef IN_RC
14309 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14310 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14311#else
14312 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14313#endif
14314}
14315
14316
14317/**
14318 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14319 *
14320 * This API ASSUMES that the caller has already verified that the guest code is
14321 * allowed to access the I/O port. (The I/O port is in the DX register in the
14322 * guest state.)
14323 *
14324 * @returns Strict VBox status code.
14325 * @param pVCpu The cross context virtual CPU structure.
14326 * @param cbValue The size of the I/O port access (1, 2, or 4).
14327 * @param enmAddrMode The addressing mode.
14328 * @param fRepPrefix Indicates whether a repeat prefix is used
14329 * (doesn't matter which for this instruction).
14330 * @param cbInstr The instruction length in bytes.
14331 * @param iEffSeg The effective segment address.
14332 * @param fIoChecked Whether the access to the I/O port has been
14333 * checked or not. It's typically checked in the
14334 * HM scenario.
14335 */
14336VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14337 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14338{
14339 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14340 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14341
14342 /*
14343 * State init.
14344 */
14345 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14346
14347 /*
14348 * Switch orgy for getting to the right handler.
14349 */
14350 VBOXSTRICTRC rcStrict;
14351 if (fRepPrefix)
14352 {
14353 switch (enmAddrMode)
14354 {
14355 case IEMMODE_16BIT:
14356 switch (cbValue)
14357 {
14358 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14359 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14360 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14361 default:
14362 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14363 }
14364 break;
14365
14366 case IEMMODE_32BIT:
14367 switch (cbValue)
14368 {
14369 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14370 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14371 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14372 default:
14373 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14374 }
14375 break;
14376
14377 case IEMMODE_64BIT:
14378 switch (cbValue)
14379 {
14380 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14381 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14382 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14383 default:
14384 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14385 }
14386 break;
14387
14388 default:
14389 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14390 }
14391 }
14392 else
14393 {
14394 switch (enmAddrMode)
14395 {
14396 case IEMMODE_16BIT:
14397 switch (cbValue)
14398 {
14399 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14400 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14401 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14402 default:
14403 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14404 }
14405 break;
14406
14407 case IEMMODE_32BIT:
14408 switch (cbValue)
14409 {
14410 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14411 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14412 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14413 default:
14414 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14415 }
14416 break;
14417
14418 case IEMMODE_64BIT:
14419 switch (cbValue)
14420 {
14421 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14422 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14423 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14424 default:
14425 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14426 }
14427 break;
14428
14429 default:
14430 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14431 }
14432 }
14433
14434 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14435}
14436
14437
14438/**
14439 * Interface for HM and EM for executing string I/O IN (read) instructions.
14440 *
14441 * This API ASSUMES that the caller has already verified that the guest code is
14442 * allowed to access the I/O port. (The I/O port is in the DX register in the
14443 * guest state.)
14444 *
14445 * @returns Strict VBox status code.
14446 * @param pVCpu The cross context virtual CPU structure.
14447 * @param cbValue The size of the I/O port access (1, 2, or 4).
14448 * @param enmAddrMode The addressing mode.
14449 * @param fRepPrefix Indicates whether a repeat prefix is used
14450 * (doesn't matter which for this instruction).
14451 * @param cbInstr The instruction length in bytes.
14452 * @param fIoChecked Whether the access to the I/O port has been
14453 * checked or not. It's typically checked in the
14454 * HM scenario.
14455 */
14456VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14457 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14458{
14459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14460
14461 /*
14462 * State init.
14463 */
14464 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14465
14466 /*
14467 * Switch orgy for getting to the right handler.
14468 */
14469 VBOXSTRICTRC rcStrict;
14470 if (fRepPrefix)
14471 {
14472 switch (enmAddrMode)
14473 {
14474 case IEMMODE_16BIT:
14475 switch (cbValue)
14476 {
14477 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14478 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14479 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14480 default:
14481 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14482 }
14483 break;
14484
14485 case IEMMODE_32BIT:
14486 switch (cbValue)
14487 {
14488 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14489 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14490 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14491 default:
14492 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14493 }
14494 break;
14495
14496 case IEMMODE_64BIT:
14497 switch (cbValue)
14498 {
14499 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14500 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14501 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14502 default:
14503 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14504 }
14505 break;
14506
14507 default:
14508 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14509 }
14510 }
14511 else
14512 {
14513 switch (enmAddrMode)
14514 {
14515 case IEMMODE_16BIT:
14516 switch (cbValue)
14517 {
14518 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14519 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14520 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14521 default:
14522 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14523 }
14524 break;
14525
14526 case IEMMODE_32BIT:
14527 switch (cbValue)
14528 {
14529 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14530 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14531 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14532 default:
14533 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14534 }
14535 break;
14536
14537 case IEMMODE_64BIT:
14538 switch (cbValue)
14539 {
14540 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14541 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14542 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14543 default:
14544 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14545 }
14546 break;
14547
14548 default:
14549 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14550 }
14551 }
14552
14553 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14554}
14555
14556
14557/**
14558 * Interface for rawmode to write execute an OUT instruction.
14559 *
14560 * @returns Strict VBox status code.
14561 * @param pVCpu The cross context virtual CPU structure.
14562 * @param cbInstr The instruction length in bytes.
14563 * @param u16Port The port to read.
14564 * @param cbReg The register size.
14565 *
14566 * @remarks In ring-0 not all of the state needs to be synced in.
14567 */
14568VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14569{
14570 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14571 Assert(cbReg <= 4 && cbReg != 3);
14572
14573 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14575 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14576}
14577
14578
14579/**
14580 * Interface for rawmode to write execute an IN instruction.
14581 *
14582 * @returns Strict VBox status code.
14583 * @param pVCpu The cross context virtual CPU structure.
14584 * @param cbInstr The instruction length in bytes.
14585 * @param u16Port The port to read.
14586 * @param cbReg The register size.
14587 */
14588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14589{
14590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14591 Assert(cbReg <= 4 && cbReg != 3);
14592
14593 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14596}
14597
14598
14599/**
14600 * Interface for HM and EM to write to a CRx register.
14601 *
14602 * @returns Strict VBox status code.
14603 * @param pVCpu The cross context virtual CPU structure.
14604 * @param cbInstr The instruction length in bytes.
14605 * @param iCrReg The control register number (destination).
14606 * @param iGReg The general purpose register number (source).
14607 *
14608 * @remarks In ring-0 not all of the state needs to be synced in.
14609 */
14610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14611{
14612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14613 Assert(iCrReg < 16);
14614 Assert(iGReg < 16);
14615
14616 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14619}
14620
14621
14622/**
14623 * Interface for HM and EM to read from a CRx register.
14624 *
14625 * @returns Strict VBox status code.
14626 * @param pVCpu The cross context virtual CPU structure.
14627 * @param cbInstr The instruction length in bytes.
14628 * @param iGReg The general purpose register number (destination).
14629 * @param iCrReg The control register number (source).
14630 *
14631 * @remarks In ring-0 not all of the state needs to be synced in.
14632 */
14633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14634{
14635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14636 Assert(iCrReg < 16);
14637 Assert(iGReg < 16);
14638
14639 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14640 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14642}
14643
14644
14645/**
14646 * Interface for HM and EM to clear the CR0[TS] bit.
14647 *
14648 * @returns Strict VBox status code.
14649 * @param pVCpu The cross context virtual CPU structure.
14650 * @param cbInstr The instruction length in bytes.
14651 *
14652 * @remarks In ring-0 not all of the state needs to be synced in.
14653 */
14654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14655{
14656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14657
14658 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14659 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14660 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14661}
14662
14663
14664/**
14665 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14666 *
14667 * @returns Strict VBox status code.
14668 * @param pVCpu The cross context virtual CPU structure.
14669 * @param cbInstr The instruction length in bytes.
14670 * @param uValue The value to load into CR0.
14671 *
14672 * @remarks In ring-0 not all of the state needs to be synced in.
14673 */
14674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14675{
14676 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14677
14678 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14679 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14680 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14681}
14682
14683
14684/**
14685 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14686 *
14687 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14688 *
14689 * @returns Strict VBox status code.
14690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14691 * @param cbInstr The instruction length in bytes.
14692 * @remarks In ring-0 not all of the state needs to be synced in.
14693 * @thread EMT(pVCpu)
14694 */
14695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14696{
14697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14698
14699 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14701 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14702}
14703
14704#ifdef IN_RING3
14705
14706/**
14707 * Handles the unlikely and probably fatal merge cases.
14708 *
14709 * @returns Merged status code.
14710 * @param rcStrict Current EM status code.
14711 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14712 * with @a rcStrict.
14713 * @param iMemMap The memory mapping index. For error reporting only.
14714 * @param pVCpu The cross context virtual CPU structure of the calling
14715 * thread, for error reporting only.
14716 */
14717DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14718 unsigned iMemMap, PVMCPU pVCpu)
14719{
14720 if (RT_FAILURE_NP(rcStrict))
14721 return rcStrict;
14722
14723 if (RT_FAILURE_NP(rcStrictCommit))
14724 return rcStrictCommit;
14725
14726 if (rcStrict == rcStrictCommit)
14727 return rcStrictCommit;
14728
14729 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14730 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14731 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14734 return VERR_IOM_FF_STATUS_IPE;
14735}
14736
14737
14738/**
14739 * Helper for IOMR3ProcessForceFlag.
14740 *
14741 * @returns Merged status code.
14742 * @param rcStrict Current EM status code.
14743 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14744 * with @a rcStrict.
14745 * @param iMemMap The memory mapping index. For error reporting only.
14746 * @param pVCpu The cross context virtual CPU structure of the calling
14747 * thread, for error reporting only.
14748 */
14749DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14750{
14751 /* Simple. */
14752 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14753 return rcStrictCommit;
14754
14755 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14756 return rcStrict;
14757
14758 /* EM scheduling status codes. */
14759 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14760 && rcStrict <= VINF_EM_LAST))
14761 {
14762 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14763 && rcStrictCommit <= VINF_EM_LAST))
14764 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14765 }
14766
14767 /* Unlikely */
14768 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14769}
14770
14771
14772/**
14773 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14774 *
14775 * @returns Merge between @a rcStrict and what the commit operation returned.
14776 * @param pVM The cross context VM structure.
14777 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14778 * @param rcStrict The status code returned by ring-0 or raw-mode.
14779 */
14780VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14781{
14782 /*
14783 * Reset the pending commit.
14784 */
14785 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14786 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14787 ("%#x %#x %#x\n",
14788 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14789 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14790
14791 /*
14792 * Commit the pending bounce buffers (usually just one).
14793 */
14794 unsigned cBufs = 0;
14795 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14796 while (iMemMap-- > 0)
14797 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14798 {
14799 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14800 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14801 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14802
14803 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14804 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14805 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14806
14807 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14808 {
14809 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14811 pbBuf,
14812 cbFirst,
14813 PGMACCESSORIGIN_IEM);
14814 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14815 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14816 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14817 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14818 }
14819
14820 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14821 {
14822 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14824 pbBuf + cbFirst,
14825 cbSecond,
14826 PGMACCESSORIGIN_IEM);
14827 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14828 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14829 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14830 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14831 }
14832 cBufs++;
14833 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14834 }
14835
14836 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14837 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14838 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14839 pVCpu->iem.s.cActiveMappings = 0;
14840 return rcStrict;
14841}
14842
14843#endif /* IN_RING3 */
14844
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette