VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65595

Last change on this file since 65595 was 65595, checked in by vboxsync, 8 years ago

Fixed an ancient typo.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 575.8 KB
Line 
1/* $Id: IEMAll.cpp 65595 2017-02-02 19:25:28Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.idxPrefix = 127;
878 pVCpu->iem.s.uVex3rdReg = 127;
879 pVCpu->iem.s.uVexLength = 127;
880 pVCpu->iem.s.fEvexStuff = 127;
881 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
882# ifdef IEM_WITH_CODE_TLB
883 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
884 pVCpu->iem.s.pbInstrBuf = NULL;
885 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
886 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
887 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
888 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
889# else
890 pVCpu->iem.s.offOpcode = 127;
891 pVCpu->iem.s.cbOpcode = 127;
892# endif
893#endif
894
895 pVCpu->iem.s.cActiveMappings = 0;
896 pVCpu->iem.s.iNextMapping = 0;
897 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
898 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
899#ifdef VBOX_WITH_RAW_MODE_NOT_R0
900 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
901 && pCtx->cs.u64Base == 0
902 && pCtx->cs.u32Limit == UINT32_MAX
903 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
904 if (!pVCpu->iem.s.fInPatchCode)
905 CPUMRawLeave(pVCpu, VINF_SUCCESS);
906#endif
907
908#ifdef IEM_VERIFICATION_MODE_FULL
909 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
910 pVCpu->iem.s.fNoRem = true;
911#endif
912}
913
914
915/**
916 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
917 *
918 * @param pVCpu The cross context virtual CPU structure of the
919 * calling thread.
920 */
921DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
922{
923 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
924#ifdef IEM_VERIFICATION_MODE_FULL
925 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
926#endif
927#ifdef VBOX_STRICT
928# ifdef IEM_WITH_CODE_TLB
929 NOREF(pVCpu);
930# else
931 pVCpu->iem.s.cbOpcode = 0;
932# endif
933#else
934 NOREF(pVCpu);
935#endif
936}
937
938
939/**
940 * Initializes the decoder state.
941 *
942 * iemReInitDecoder is mostly a copy of this function.
943 *
944 * @param pVCpu The cross context virtual CPU structure of the
945 * calling thread.
946 * @param fBypassHandlers Whether to bypass access handlers.
947 */
948DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
949{
950 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
951
952 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
953
954#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
959 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
960 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
961 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
962 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
963#endif
964
965#ifdef VBOX_WITH_RAW_MODE_NOT_R0
966 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
967#endif
968 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
969#ifdef IEM_VERIFICATION_MODE_FULL
970 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
971 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
972#endif
973 IEMMODE enmMode = iemCalcCpuMode(pCtx);
974 pVCpu->iem.s.enmCpuMode = enmMode;
975 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
976 pVCpu->iem.s.enmEffAddrMode = enmMode;
977 if (enmMode != IEMMODE_64BIT)
978 {
979 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
980 pVCpu->iem.s.enmEffOpSize = enmMode;
981 }
982 else
983 {
984 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
985 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
986 }
987 pVCpu->iem.s.fPrefixes = 0;
988 pVCpu->iem.s.uRexReg = 0;
989 pVCpu->iem.s.uRexB = 0;
990 pVCpu->iem.s.uRexIndex = 0;
991 pVCpu->iem.s.idxPrefix = 0;
992 pVCpu->iem.s.uVex3rdReg = 0;
993 pVCpu->iem.s.uVexLength = 0;
994 pVCpu->iem.s.fEvexStuff = 0;
995 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
996#ifdef IEM_WITH_CODE_TLB
997 pVCpu->iem.s.pbInstrBuf = NULL;
998 pVCpu->iem.s.offInstrNextByte = 0;
999 pVCpu->iem.s.offCurInstrStart = 0;
1000# ifdef VBOX_STRICT
1001 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1002 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1003 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1004# endif
1005#else
1006 pVCpu->iem.s.offOpcode = 0;
1007 pVCpu->iem.s.cbOpcode = 0;
1008#endif
1009 pVCpu->iem.s.cActiveMappings = 0;
1010 pVCpu->iem.s.iNextMapping = 0;
1011 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1012 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1013#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1014 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1015 && pCtx->cs.u64Base == 0
1016 && pCtx->cs.u32Limit == UINT32_MAX
1017 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1018 if (!pVCpu->iem.s.fInPatchCode)
1019 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1020#endif
1021
1022#ifdef DBGFTRACE_ENABLED
1023 switch (enmMode)
1024 {
1025 case IEMMODE_64BIT:
1026 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1027 break;
1028 case IEMMODE_32BIT:
1029 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1030 break;
1031 case IEMMODE_16BIT:
1032 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1033 break;
1034 }
1035#endif
1036}
1037
1038
1039/**
1040 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1041 *
1042 * This is mostly a copy of iemInitDecoder.
1043 *
1044 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1045 */
1046DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1047{
1048 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1049
1050 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1051
1052#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1053 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1054 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1055 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1056 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1057 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1058 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1059 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1061#endif
1062
1063 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1064#ifdef IEM_VERIFICATION_MODE_FULL
1065 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1066 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1067#endif
1068 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1069 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1070 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1071 pVCpu->iem.s.enmEffAddrMode = enmMode;
1072 if (enmMode != IEMMODE_64BIT)
1073 {
1074 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1075 pVCpu->iem.s.enmEffOpSize = enmMode;
1076 }
1077 else
1078 {
1079 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1080 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1081 }
1082 pVCpu->iem.s.fPrefixes = 0;
1083 pVCpu->iem.s.uRexReg = 0;
1084 pVCpu->iem.s.uRexB = 0;
1085 pVCpu->iem.s.uRexIndex = 0;
1086 pVCpu->iem.s.fPrefixes = 0;
1087 pVCpu->iem.s.uVex3rdReg = 0;
1088 pVCpu->iem.s.uVexLength = 0;
1089 pVCpu->iem.s.fEvexStuff = 0;
1090 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1091#ifdef IEM_WITH_CODE_TLB
1092 if (pVCpu->iem.s.pbInstrBuf)
1093 {
1094 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1095 - pVCpu->iem.s.uInstrBufPc;
1096 if (off < pVCpu->iem.s.cbInstrBufTotal)
1097 {
1098 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1099 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1100 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1101 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1102 else
1103 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1104 }
1105 else
1106 {
1107 pVCpu->iem.s.pbInstrBuf = NULL;
1108 pVCpu->iem.s.offInstrNextByte = 0;
1109 pVCpu->iem.s.offCurInstrStart = 0;
1110 pVCpu->iem.s.cbInstrBuf = 0;
1111 pVCpu->iem.s.cbInstrBufTotal = 0;
1112 }
1113 }
1114 else
1115 {
1116 pVCpu->iem.s.offInstrNextByte = 0;
1117 pVCpu->iem.s.offCurInstrStart = 0;
1118 pVCpu->iem.s.cbInstrBuf = 0;
1119 pVCpu->iem.s.cbInstrBufTotal = 0;
1120 }
1121#else
1122 pVCpu->iem.s.cbOpcode = 0;
1123 pVCpu->iem.s.offOpcode = 0;
1124#endif
1125 Assert(pVCpu->iem.s.cActiveMappings == 0);
1126 pVCpu->iem.s.iNextMapping = 0;
1127 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1128 Assert(pVCpu->iem.s.fBypassHandlers == false);
1129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1130 if (!pVCpu->iem.s.fInPatchCode)
1131 { /* likely */ }
1132 else
1133 {
1134 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1135 && pCtx->cs.u64Base == 0
1136 && pCtx->cs.u32Limit == UINT32_MAX
1137 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1138 if (!pVCpu->iem.s.fInPatchCode)
1139 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1140 }
1141#endif
1142
1143#ifdef DBGFTRACE_ENABLED
1144 switch (enmMode)
1145 {
1146 case IEMMODE_64BIT:
1147 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1148 break;
1149 case IEMMODE_32BIT:
1150 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1151 break;
1152 case IEMMODE_16BIT:
1153 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1154 break;
1155 }
1156#endif
1157}
1158
1159
1160
1161/**
1162 * Prefetch opcodes the first time when starting executing.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVCpu The cross context virtual CPU structure of the
1166 * calling thread.
1167 * @param fBypassHandlers Whether to bypass access handlers.
1168 */
1169IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1170{
1171#ifdef IEM_VERIFICATION_MODE_FULL
1172 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1173#endif
1174 iemInitDecoder(pVCpu, fBypassHandlers);
1175
1176#ifdef IEM_WITH_CODE_TLB
1177 /** @todo Do ITLB lookup here. */
1178
1179#else /* !IEM_WITH_CODE_TLB */
1180
1181 /*
1182 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1183 *
1184 * First translate CS:rIP to a physical address.
1185 */
1186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1187 uint32_t cbToTryRead;
1188 RTGCPTR GCPtrPC;
1189 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1190 {
1191 cbToTryRead = PAGE_SIZE;
1192 GCPtrPC = pCtx->rip;
1193 if (IEM_IS_CANONICAL(GCPtrPC))
1194 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1195 else
1196 return iemRaiseGeneralProtectionFault0(pVCpu);
1197 }
1198 else
1199 {
1200 uint32_t GCPtrPC32 = pCtx->eip;
1201 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1202 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1203 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1204 else
1205 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1206 if (cbToTryRead) { /* likely */ }
1207 else /* overflowed */
1208 {
1209 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1210 cbToTryRead = UINT32_MAX;
1211 }
1212 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1213 Assert(GCPtrPC <= UINT32_MAX);
1214 }
1215
1216# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1217 /* Allow interpretation of patch manager code blocks since they can for
1218 instance throw #PFs for perfectly good reasons. */
1219 if (pVCpu->iem.s.fInPatchCode)
1220 {
1221 size_t cbRead = 0;
1222 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1223 AssertRCReturn(rc, rc);
1224 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1225 return VINF_SUCCESS;
1226 }
1227# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1228
1229 RTGCPHYS GCPhys;
1230 uint64_t fFlags;
1231 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1232 if (RT_SUCCESS(rc)) { /* probable */ }
1233 else
1234 {
1235 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1236 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1237 }
1238 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1239 else
1240 {
1241 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1242 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1243 }
1244 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1245 else
1246 {
1247 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1248 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1249 }
1250 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255# ifdef IEM_VERIFICATION_MODE_FULL
1256 /*
1257 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1258 * instruction.
1259 */
1260 /** @todo optimize this differently by not using PGMPhysRead. */
1261 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1262 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1263 if ( offPrevOpcodes < cbOldOpcodes
1264 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1265 {
1266 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1267 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1268 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1269 pVCpu->iem.s.cbOpcode = cbNew;
1270 return VINF_SUCCESS;
1271 }
1272# endif
1273
1274 /*
1275 * Read the bytes at this address.
1276 */
1277 PVM pVM = pVCpu->CTX_SUFF(pVM);
1278# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1279 size_t cbActual;
1280 if ( PATMIsEnabled(pVM)
1281 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1282 {
1283 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1284 Assert(cbActual > 0);
1285 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1286 }
1287 else
1288# endif
1289 {
1290 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1291 if (cbToTryRead > cbLeftOnPage)
1292 cbToTryRead = cbLeftOnPage;
1293 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1294 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1295
1296 if (!pVCpu->iem.s.fBypassHandlers)
1297 {
1298 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1299 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1300 { /* likely */ }
1301 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1304 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1305 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1306 }
1307 else
1308 {
1309 Log((RT_SUCCESS(rcStrict)
1310 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1311 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1312 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1313 return rcStrict;
1314 }
1315 }
1316 else
1317 {
1318 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1319 if (RT_SUCCESS(rc))
1320 { /* likely */ }
1321 else
1322 {
1323 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1324 GCPtrPC, GCPhys, rc, cbToTryRead));
1325 return rc;
1326 }
1327 }
1328 pVCpu->iem.s.cbOpcode = cbToTryRead;
1329 }
1330#endif /* !IEM_WITH_CODE_TLB */
1331 return VINF_SUCCESS;
1332}
1333
1334
1335/**
1336 * Invalidates the IEM TLBs.
1337 *
1338 * This is called internally as well as by PGM when moving GC mappings.
1339 *
1340 * @returns
1341 * @param pVCpu The cross context virtual CPU structure of the calling
1342 * thread.
1343 * @param fVmm Set when PGM calls us with a remapping.
1344 */
1345VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1346{
1347#ifdef IEM_WITH_CODE_TLB
1348 pVCpu->iem.s.cbInstrBufTotal = 0;
1349 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1350 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1351 { /* very likely */ }
1352 else
1353 {
1354 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1355 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1356 while (i-- > 0)
1357 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1358 }
1359#endif
1360
1361#ifdef IEM_WITH_DATA_TLB
1362 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1363 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1364 { /* very likely */ }
1365 else
1366 {
1367 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1368 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1369 while (i-- > 0)
1370 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1371 }
1372#endif
1373 NOREF(pVCpu); NOREF(fVmm);
1374}
1375
1376
1377/**
1378 * Invalidates a page in the TLBs.
1379 *
1380 * @param pVCpu The cross context virtual CPU structure of the calling
1381 * thread.
1382 * @param GCPtr The address of the page to invalidate
1383 */
1384VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1385{
1386#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1387 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1388 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1389 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1390 uintptr_t idx = (uint8_t)GCPtr;
1391
1392# ifdef IEM_WITH_CODE_TLB
1393 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1394 {
1395 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1396 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1397 pVCpu->iem.s.cbInstrBufTotal = 0;
1398 }
1399# endif
1400
1401# ifdef IEM_WITH_DATA_TLB
1402 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1403 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1404# endif
1405#else
1406 NOREF(pVCpu); NOREF(GCPtr);
1407#endif
1408}
1409
1410
1411/**
1412 * Invalidates the host physical aspects of the IEM TLBs.
1413 *
1414 * This is called internally as well as by PGM when moving GC mappings.
1415 *
1416 * @param pVCpu The cross context virtual CPU structure of the calling
1417 * thread.
1418 */
1419VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1420{
1421#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1422 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1423
1424# ifdef IEM_WITH_CODE_TLB
1425 pVCpu->iem.s.cbInstrBufTotal = 0;
1426# endif
1427 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1428 if (uTlbPhysRev != 0)
1429 {
1430 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1431 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1432 }
1433 else
1434 {
1435 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1436 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1437
1438 unsigned i;
1439# ifdef IEM_WITH_CODE_TLB
1440 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1441 while (i-- > 0)
1442 {
1443 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1444 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1445 }
1446# endif
1447# ifdef IEM_WITH_DATA_TLB
1448 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1449 while (i-- > 0)
1450 {
1451 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1452 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1453 }
1454# endif
1455 }
1456#else
1457 NOREF(pVCpu);
1458#endif
1459}
1460
1461
1462/**
1463 * Invalidates the host physical aspects of the IEM TLBs.
1464 *
1465 * This is called internally as well as by PGM when moving GC mappings.
1466 *
1467 * @param pVM The cross context VM structure.
1468 *
1469 * @remarks Caller holds the PGM lock.
1470 */
1471VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1472{
1473 RT_NOREF_PV(pVM);
1474}
1475
1476#ifdef IEM_WITH_CODE_TLB
1477
1478/**
1479 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1480 * failure and jumps.
1481 *
1482 * We end up here for a number of reasons:
1483 * - pbInstrBuf isn't yet initialized.
1484 * - Advancing beyond the buffer boundrary (e.g. cross page).
1485 * - Advancing beyond the CS segment limit.
1486 * - Fetching from non-mappable page (e.g. MMIO).
1487 *
1488 * @param pVCpu The cross context virtual CPU structure of the
1489 * calling thread.
1490 * @param pvDst Where to return the bytes.
1491 * @param cbDst Number of bytes to read.
1492 *
1493 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1494 */
1495IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1496{
1497#ifdef IN_RING3
1498//__debugbreak();
1499 for (;;)
1500 {
1501 Assert(cbDst <= 8);
1502 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1503
1504 /*
1505 * We might have a partial buffer match, deal with that first to make the
1506 * rest simpler. This is the first part of the cross page/buffer case.
1507 */
1508 if (pVCpu->iem.s.pbInstrBuf != NULL)
1509 {
1510 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1511 {
1512 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1513 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1514 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1515
1516 cbDst -= cbCopy;
1517 pvDst = (uint8_t *)pvDst + cbCopy;
1518 offBuf += cbCopy;
1519 pVCpu->iem.s.offInstrNextByte += offBuf;
1520 }
1521 }
1522
1523 /*
1524 * Check segment limit, figuring how much we're allowed to access at this point.
1525 *
1526 * We will fault immediately if RIP is past the segment limit / in non-canonical
1527 * territory. If we do continue, there are one or more bytes to read before we
1528 * end up in trouble and we need to do that first before faulting.
1529 */
1530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1531 RTGCPTR GCPtrFirst;
1532 uint32_t cbMaxRead;
1533 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1534 {
1535 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1536 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1537 { /* likely */ }
1538 else
1539 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1540 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1541 }
1542 else
1543 {
1544 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1545 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1546 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1547 { /* likely */ }
1548 else
1549 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1550 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1551 if (cbMaxRead != 0)
1552 { /* likely */ }
1553 else
1554 {
1555 /* Overflowed because address is 0 and limit is max. */
1556 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1557 cbMaxRead = X86_PAGE_SIZE;
1558 }
1559 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1560 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1561 if (cbMaxRead2 < cbMaxRead)
1562 cbMaxRead = cbMaxRead2;
1563 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1564 }
1565
1566 /*
1567 * Get the TLB entry for this piece of code.
1568 */
1569 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1572 if (pTlbe->uTag == uTag)
1573 {
1574 /* likely when executing lots of code, otherwise unlikely */
1575# ifdef VBOX_WITH_STATISTICS
1576 pVCpu->iem.s.CodeTlb.cTlbHits++;
1577# endif
1578 }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1582# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1583 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1584 {
1585 pTlbe->uTag = uTag;
1586 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1587 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1588 pTlbe->GCPhys = NIL_RTGCPHYS;
1589 pTlbe->pbMappingR3 = NULL;
1590 }
1591 else
1592# endif
1593 {
1594 RTGCPHYS GCPhys;
1595 uint64_t fFlags;
1596 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1597 if (RT_FAILURE(rc))
1598 {
1599 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1600 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1601 }
1602
1603 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1604 pTlbe->uTag = uTag;
1605 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1606 pTlbe->GCPhys = GCPhys;
1607 pTlbe->pbMappingR3 = NULL;
1608 }
1609 }
1610
1611 /*
1612 * Check TLB page table level access flags.
1613 */
1614 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1615 {
1616 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1617 {
1618 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1619 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1620 }
1621 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1622 {
1623 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1624 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1625 }
1626 }
1627
1628# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1629 /*
1630 * Allow interpretation of patch manager code blocks since they can for
1631 * instance throw #PFs for perfectly good reasons.
1632 */
1633 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1634 { /* no unlikely */ }
1635 else
1636 {
1637 /** @todo Could be optimized this a little in ring-3 if we liked. */
1638 size_t cbRead = 0;
1639 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1640 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1641 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1642 return;
1643 }
1644# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1645
1646 /*
1647 * Look up the physical page info if necessary.
1648 */
1649 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1650 { /* not necessary */ }
1651 else
1652 {
1653 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1654 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1655 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1656 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1657 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1658 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1659 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1660 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1661 }
1662
1663# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1664 /*
1665 * Try do a direct read using the pbMappingR3 pointer.
1666 */
1667 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1668 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1669 {
1670 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1671 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1672 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1673 {
1674 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1675 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1676 }
1677 else
1678 {
1679 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1680 Assert(cbInstr < cbMaxRead);
1681 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1682 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1683 }
1684 if (cbDst <= cbMaxRead)
1685 {
1686 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1687 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1688 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1689 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1690 return;
1691 }
1692 pVCpu->iem.s.pbInstrBuf = NULL;
1693
1694 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1695 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1696 }
1697 else
1698# endif
1699#if 0
1700 /*
1701 * If there is no special read handling, so we can read a bit more and
1702 * put it in the prefetch buffer.
1703 */
1704 if ( cbDst < cbMaxRead
1705 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1706 {
1707 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1708 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1709 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1710 { /* likely */ }
1711 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1712 {
1713 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1714 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1715 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1716 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1717 }
1718 else
1719 {
1720 Log((RT_SUCCESS(rcStrict)
1721 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1722 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1723 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1724 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1725 }
1726 }
1727 /*
1728 * Special read handling, so only read exactly what's needed.
1729 * This is a highly unlikely scenario.
1730 */
1731 else
1732#endif
1733 {
1734 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1735 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1736 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1737 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1738 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1739 { /* likely */ }
1740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1741 {
1742 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1743 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1744 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1745 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1746 }
1747 else
1748 {
1749 Log((RT_SUCCESS(rcStrict)
1750 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1751 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1752 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1753 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1754 }
1755 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1756 if (cbToRead == cbDst)
1757 return;
1758 }
1759
1760 /*
1761 * More to read, loop.
1762 */
1763 cbDst -= cbMaxRead;
1764 pvDst = (uint8_t *)pvDst + cbMaxRead;
1765 }
1766#else
1767 RT_NOREF(pvDst, cbDst);
1768 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1769#endif
1770}
1771
1772#else
1773
1774/**
1775 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1776 * exception if it fails.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pVCpu The cross context virtual CPU structure of the
1780 * calling thread.
1781 * @param cbMin The minimum number of bytes relative offOpcode
1782 * that must be read.
1783 */
1784IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1785{
1786 /*
1787 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1788 *
1789 * First translate CS:rIP to a physical address.
1790 */
1791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1792 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1793 uint32_t cbToTryRead;
1794 RTGCPTR GCPtrNext;
1795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1796 {
1797 cbToTryRead = PAGE_SIZE;
1798 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1799 if (!IEM_IS_CANONICAL(GCPtrNext))
1800 return iemRaiseGeneralProtectionFault0(pVCpu);
1801 }
1802 else
1803 {
1804 uint32_t GCPtrNext32 = pCtx->eip;
1805 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1806 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1807 if (GCPtrNext32 > pCtx->cs.u32Limit)
1808 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1809 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1810 if (!cbToTryRead) /* overflowed */
1811 {
1812 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1813 cbToTryRead = UINT32_MAX;
1814 /** @todo check out wrapping around the code segment. */
1815 }
1816 if (cbToTryRead < cbMin - cbLeft)
1817 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1818 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1819 }
1820
1821 /* Only read up to the end of the page, and make sure we don't read more
1822 than the opcode buffer can hold. */
1823 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1824 if (cbToTryRead > cbLeftOnPage)
1825 cbToTryRead = cbLeftOnPage;
1826 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1827 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1828/** @todo r=bird: Convert assertion into undefined opcode exception? */
1829 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1830
1831# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1832 /* Allow interpretation of patch manager code blocks since they can for
1833 instance throw #PFs for perfectly good reasons. */
1834 if (pVCpu->iem.s.fInPatchCode)
1835 {
1836 size_t cbRead = 0;
1837 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1838 AssertRCReturn(rc, rc);
1839 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1840 return VINF_SUCCESS;
1841 }
1842# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1843
1844 RTGCPHYS GCPhys;
1845 uint64_t fFlags;
1846 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1847 if (RT_FAILURE(rc))
1848 {
1849 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1850 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1851 }
1852 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1853 {
1854 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1855 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1856 }
1857 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1858 {
1859 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1860 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1861 }
1862 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1863 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1864 /** @todo Check reserved bits and such stuff. PGM is better at doing
1865 * that, so do it when implementing the guest virtual address
1866 * TLB... */
1867
1868 /*
1869 * Read the bytes at this address.
1870 *
1871 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1872 * and since PATM should only patch the start of an instruction there
1873 * should be no need to check again here.
1874 */
1875 if (!pVCpu->iem.s.fBypassHandlers)
1876 {
1877 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1878 cbToTryRead, PGMACCESSORIGIN_IEM);
1879 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1880 { /* likely */ }
1881 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1882 {
1883 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1884 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1885 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 return rcStrict;
1894 }
1895 }
1896 else
1897 {
1898 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1899 if (RT_SUCCESS(rc))
1900 { /* likely */ }
1901 else
1902 {
1903 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1904 return rc;
1905 }
1906 }
1907 pVCpu->iem.s.cbOpcode += cbToTryRead;
1908 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1909
1910 return VINF_SUCCESS;
1911}
1912
1913#endif /* !IEM_WITH_CODE_TLB */
1914#ifndef IEM_WITH_SETJMP
1915
1916/**
1917 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1918 *
1919 * @returns Strict VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure of the
1921 * calling thread.
1922 * @param pb Where to return the opcode byte.
1923 */
1924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1925{
1926 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1927 if (rcStrict == VINF_SUCCESS)
1928 {
1929 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1930 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1931 pVCpu->iem.s.offOpcode = offOpcode + 1;
1932 }
1933 else
1934 *pb = 0;
1935 return rcStrict;
1936}
1937
1938
1939/**
1940 * Fetches the next opcode byte.
1941 *
1942 * @returns Strict VBox status code.
1943 * @param pVCpu The cross context virtual CPU structure of the
1944 * calling thread.
1945 * @param pu8 Where to return the opcode byte.
1946 */
1947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1948{
1949 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1950 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1951 {
1952 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1953 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1954 return VINF_SUCCESS;
1955 }
1956 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1957}
1958
1959#else /* IEM_WITH_SETJMP */
1960
1961/**
1962 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1963 *
1964 * @returns The opcode byte.
1965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1966 */
1967DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1968{
1969# ifdef IEM_WITH_CODE_TLB
1970 uint8_t u8;
1971 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1972 return u8;
1973# else
1974 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1975 if (rcStrict == VINF_SUCCESS)
1976 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1977 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1978# endif
1979}
1980
1981
1982/**
1983 * Fetches the next opcode byte, longjmp on error.
1984 *
1985 * @returns The opcode byte.
1986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1987 */
1988DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1989{
1990# ifdef IEM_WITH_CODE_TLB
1991 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1992 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1993 if (RT_LIKELY( pbBuf != NULL
1994 && offBuf < pVCpu->iem.s.cbInstrBuf))
1995 {
1996 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1997 return pbBuf[offBuf];
1998 }
1999# else
2000 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2001 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2002 {
2003 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2004 return pVCpu->iem.s.abOpcode[offOpcode];
2005 }
2006# endif
2007 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2008}
2009
2010#endif /* IEM_WITH_SETJMP */
2011
2012/**
2013 * Fetches the next opcode byte, returns automatically on failure.
2014 *
2015 * @param a_pu8 Where to return the opcode byte.
2016 * @remark Implicitly references pVCpu.
2017 */
2018#ifndef IEM_WITH_SETJMP
2019# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2020 do \
2021 { \
2022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2023 if (rcStrict2 == VINF_SUCCESS) \
2024 { /* likely */ } \
2025 else \
2026 return rcStrict2; \
2027 } while (0)
2028#else
2029# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2030#endif /* IEM_WITH_SETJMP */
2031
2032
2033#ifndef IEM_WITH_SETJMP
2034/**
2035 * Fetches the next signed byte from the opcode stream.
2036 *
2037 * @returns Strict VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param pi8 Where to return the signed byte.
2040 */
2041DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2042{
2043 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2044}
2045#endif /* !IEM_WITH_SETJMP */
2046
2047
2048/**
2049 * Fetches the next signed byte from the opcode stream, returning automatically
2050 * on failure.
2051 *
2052 * @param a_pi8 Where to return the signed byte.
2053 * @remark Implicitly references pVCpu.
2054 */
2055#ifndef IEM_WITH_SETJMP
2056# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2057 do \
2058 { \
2059 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2060 if (rcStrict2 != VINF_SUCCESS) \
2061 return rcStrict2; \
2062 } while (0)
2063#else /* IEM_WITH_SETJMP */
2064# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2065
2066#endif /* IEM_WITH_SETJMP */
2067
2068#ifndef IEM_WITH_SETJMP
2069
2070/**
2071 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2072 *
2073 * @returns Strict VBox status code.
2074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2075 * @param pu16 Where to return the opcode dword.
2076 */
2077DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2078{
2079 uint8_t u8;
2080 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2081 if (rcStrict == VINF_SUCCESS)
2082 *pu16 = (int8_t)u8;
2083 return rcStrict;
2084}
2085
2086
2087/**
2088 * Fetches the next signed byte from the opcode stream, extending it to
2089 * unsigned 16-bit.
2090 *
2091 * @returns Strict VBox status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param pu16 Where to return the unsigned word.
2094 */
2095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2096{
2097 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2098 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2099 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2100
2101 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2102 pVCpu->iem.s.offOpcode = offOpcode + 1;
2103 return VINF_SUCCESS;
2104}
2105
2106#endif /* !IEM_WITH_SETJMP */
2107
2108/**
2109 * Fetches the next signed byte from the opcode stream and sign-extending it to
2110 * a word, returning automatically on failure.
2111 *
2112 * @param a_pu16 Where to return the word.
2113 * @remark Implicitly references pVCpu.
2114 */
2115#ifndef IEM_WITH_SETJMP
2116# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2117 do \
2118 { \
2119 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2120 if (rcStrict2 != VINF_SUCCESS) \
2121 return rcStrict2; \
2122 } while (0)
2123#else
2124# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2125#endif
2126
2127#ifndef IEM_WITH_SETJMP
2128
2129/**
2130 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2131 *
2132 * @returns Strict VBox status code.
2133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2134 * @param pu32 Where to return the opcode dword.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2137{
2138 uint8_t u8;
2139 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2140 if (rcStrict == VINF_SUCCESS)
2141 *pu32 = (int8_t)u8;
2142 return rcStrict;
2143}
2144
2145
2146/**
2147 * Fetches the next signed byte from the opcode stream, extending it to
2148 * unsigned 32-bit.
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 * @param pu32 Where to return the unsigned dword.
2153 */
2154DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2155{
2156 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2157 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2158 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2159
2160 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2161 pVCpu->iem.s.offOpcode = offOpcode + 1;
2162 return VINF_SUCCESS;
2163}
2164
2165#endif /* !IEM_WITH_SETJMP */
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream and sign-extending it to
2169 * a word, returning automatically on failure.
2170 *
2171 * @param a_pu32 Where to return the word.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else
2183# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184#endif
2185
2186#ifndef IEM_WITH_SETJMP
2187
2188/**
2189 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2190 *
2191 * @returns Strict VBox status code.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 * @param pu64 Where to return the opcode qword.
2194 */
2195DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2196{
2197 uint8_t u8;
2198 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2199 if (rcStrict == VINF_SUCCESS)
2200 *pu64 = (int8_t)u8;
2201 return rcStrict;
2202}
2203
2204
2205/**
2206 * Fetches the next signed byte from the opcode stream, extending it to
2207 * unsigned 64-bit.
2208 *
2209 * @returns Strict VBox status code.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 * @param pu64 Where to return the unsigned qword.
2212 */
2213DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2214{
2215 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2216 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2217 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2218
2219 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2220 pVCpu->iem.s.offOpcode = offOpcode + 1;
2221 return VINF_SUCCESS;
2222}
2223
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu64 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode word.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2259 if (rcStrict == VINF_SUCCESS)
2260 {
2261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2263 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2264# else
2265 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2266# endif
2267 pVCpu->iem.s.offOpcode = offOpcode + 2;
2268 }
2269 else
2270 *pu16 = 0;
2271 return rcStrict;
2272}
2273
2274
2275/**
2276 * Fetches the next opcode word.
2277 *
2278 * @returns Strict VBox status code.
2279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2280 * @param pu16 Where to return the opcode word.
2281 */
2282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2283{
2284 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2285 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2286 {
2287 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2288# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2289 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2290# else
2291 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2292# endif
2293 return VINF_SUCCESS;
2294 }
2295 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2296}
2297
2298#else /* IEM_WITH_SETJMP */
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2302 *
2303 * @returns The opcode word.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 */
2306DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2307{
2308# ifdef IEM_WITH_CODE_TLB
2309 uint16_t u16;
2310 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2311 return u16;
2312# else
2313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2314 if (rcStrict == VINF_SUCCESS)
2315 {
2316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2317 pVCpu->iem.s.offOpcode += 2;
2318# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2319 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2320# else
2321 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2322# endif
2323 }
2324 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2325# endif
2326}
2327
2328
2329/**
2330 * Fetches the next opcode word, longjmp on error.
2331 *
2332 * @returns The opcode word.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 */
2335DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2336{
2337# ifdef IEM_WITH_CODE_TLB
2338 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2339 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2340 if (RT_LIKELY( pbBuf != NULL
2341 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2342 {
2343 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2344# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2345 return *(uint16_t const *)&pbBuf[offBuf];
2346# else
2347 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2348# endif
2349 }
2350# else
2351 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2352 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2353 {
2354 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2355# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2356 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2357# else
2358 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2359# endif
2360 }
2361# endif
2362 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2363}
2364
2365#endif /* IEM_WITH_SETJMP */
2366
2367
2368/**
2369 * Fetches the next opcode word, returns automatically on failure.
2370 *
2371 * @param a_pu16 Where to return the opcode word.
2372 * @remark Implicitly references pVCpu.
2373 */
2374#ifndef IEM_WITH_SETJMP
2375# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2376 do \
2377 { \
2378 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2379 if (rcStrict2 != VINF_SUCCESS) \
2380 return rcStrict2; \
2381 } while (0)
2382#else
2383# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2384#endif
2385
2386#ifndef IEM_WITH_SETJMP
2387
2388/**
2389 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu32 Where to return the opcode double word.
2394 */
2395DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2396{
2397 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2398 if (rcStrict == VINF_SUCCESS)
2399 {
2400 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2401 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2402 pVCpu->iem.s.offOpcode = offOpcode + 2;
2403 }
2404 else
2405 *pu32 = 0;
2406 return rcStrict;
2407}
2408
2409
2410/**
2411 * Fetches the next opcode word, zero extending it to a double word.
2412 *
2413 * @returns Strict VBox status code.
2414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2415 * @param pu32 Where to return the opcode double word.
2416 */
2417DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2418{
2419 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2420 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2421 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2422
2423 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2424 pVCpu->iem.s.offOpcode = offOpcode + 2;
2425 return VINF_SUCCESS;
2426}
2427
2428#endif /* !IEM_WITH_SETJMP */
2429
2430
2431/**
2432 * Fetches the next opcode word and zero extends it to a double word, returns
2433 * automatically on failure.
2434 *
2435 * @param a_pu32 Where to return the opcode double word.
2436 * @remark Implicitly references pVCpu.
2437 */
2438#ifndef IEM_WITH_SETJMP
2439# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2440 do \
2441 { \
2442 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2443 if (rcStrict2 != VINF_SUCCESS) \
2444 return rcStrict2; \
2445 } while (0)
2446#else
2447# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2448#endif
2449
2450#ifndef IEM_WITH_SETJMP
2451
2452/**
2453 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2454 *
2455 * @returns Strict VBox status code.
2456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2457 * @param pu64 Where to return the opcode quad word.
2458 */
2459DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2460{
2461 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2462 if (rcStrict == VINF_SUCCESS)
2463 {
2464 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2465 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2466 pVCpu->iem.s.offOpcode = offOpcode + 2;
2467 }
2468 else
2469 *pu64 = 0;
2470 return rcStrict;
2471}
2472
2473
2474/**
2475 * Fetches the next opcode word, zero extending it to a quad word.
2476 *
2477 * @returns Strict VBox status code.
2478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2479 * @param pu64 Where to return the opcode quad word.
2480 */
2481DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2482{
2483 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2484 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2485 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2486
2487 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488 pVCpu->iem.s.offOpcode = offOpcode + 2;
2489 return VINF_SUCCESS;
2490}
2491
2492#endif /* !IEM_WITH_SETJMP */
2493
2494/**
2495 * Fetches the next opcode word and zero extends it to a quad word, returns
2496 * automatically on failure.
2497 *
2498 * @param a_pu64 Where to return the opcode quad word.
2499 * @remark Implicitly references pVCpu.
2500 */
2501#ifndef IEM_WITH_SETJMP
2502# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2503 do \
2504 { \
2505 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2506 if (rcStrict2 != VINF_SUCCESS) \
2507 return rcStrict2; \
2508 } while (0)
2509#else
2510# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2511#endif
2512
2513
2514#ifndef IEM_WITH_SETJMP
2515/**
2516 * Fetches the next signed word from the opcode stream.
2517 *
2518 * @returns Strict VBox status code.
2519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2520 * @param pi16 Where to return the signed word.
2521 */
2522DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2523{
2524 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2525}
2526#endif /* !IEM_WITH_SETJMP */
2527
2528
2529/**
2530 * Fetches the next signed word from the opcode stream, returning automatically
2531 * on failure.
2532 *
2533 * @param a_pi16 Where to return the signed word.
2534 * @remark Implicitly references pVCpu.
2535 */
2536#ifndef IEM_WITH_SETJMP
2537# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2538 do \
2539 { \
2540 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2541 if (rcStrict2 != VINF_SUCCESS) \
2542 return rcStrict2; \
2543 } while (0)
2544#else
2545# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2546#endif
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu32 Where to return the opcode dword.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2567 pVCpu->iem.s.abOpcode[offOpcode + 1],
2568 pVCpu->iem.s.abOpcode[offOpcode + 2],
2569 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2570# endif
2571 pVCpu->iem.s.offOpcode = offOpcode + 4;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode dword.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2590 {
2591 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2593 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2594# else
2595 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2596 pVCpu->iem.s.abOpcode[offOpcode + 1],
2597 pVCpu->iem.s.abOpcode[offOpcode + 2],
2598 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2599# endif
2600 return VINF_SUCCESS;
2601 }
2602 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2603}
2604
2605#else /* !IEM_WITH_SETJMP */
2606
2607/**
2608 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2609 *
2610 * @returns The opcode dword.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 */
2613DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2614{
2615# ifdef IEM_WITH_CODE_TLB
2616 uint32_t u32;
2617 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2618 return u32;
2619# else
2620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2621 if (rcStrict == VINF_SUCCESS)
2622 {
2623 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2624 pVCpu->iem.s.offOpcode = offOpcode + 4;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 }
2634 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2635# endif
2636}
2637
2638
2639/**
2640 * Fetches the next opcode dword, longjmp on error.
2641 *
2642 * @returns The opcode dword.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 */
2645DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2646{
2647# ifdef IEM_WITH_CODE_TLB
2648 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2649 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2650 if (RT_LIKELY( pbBuf != NULL
2651 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2652 {
2653 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 return *(uint32_t const *)&pbBuf[offBuf];
2656# else
2657 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2658 pbBuf[offBuf + 1],
2659 pbBuf[offBuf + 2],
2660 pbBuf[offBuf + 3]);
2661# endif
2662 }
2663# else
2664 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2665 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2666 {
2667 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2668# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2669 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2670# else
2671 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2672 pVCpu->iem.s.abOpcode[offOpcode + 1],
2673 pVCpu->iem.s.abOpcode[offOpcode + 2],
2674 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2675# endif
2676 }
2677# endif
2678 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2679}
2680
2681#endif /* !IEM_WITH_SETJMP */
2682
2683
2684/**
2685 * Fetches the next opcode dword, returns automatically on failure.
2686 *
2687 * @param a_pu32 Where to return the opcode dword.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2700#endif
2701
2702#ifndef IEM_WITH_SETJMP
2703
2704/**
2705 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pu64 Where to return the opcode dword.
2710 */
2711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2712{
2713 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2714 if (rcStrict == VINF_SUCCESS)
2715 {
2716 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2717 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2718 pVCpu->iem.s.abOpcode[offOpcode + 1],
2719 pVCpu->iem.s.abOpcode[offOpcode + 2],
2720 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2721 pVCpu->iem.s.offOpcode = offOpcode + 4;
2722 }
2723 else
2724 *pu64 = 0;
2725 return rcStrict;
2726}
2727
2728
2729/**
2730 * Fetches the next opcode dword, zero extending it to a quad word.
2731 *
2732 * @returns Strict VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2734 * @param pu64 Where to return the opcode quad word.
2735 */
2736DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2737{
2738 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2739 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2740 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2741
2742 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746 pVCpu->iem.s.offOpcode = offOpcode + 4;
2747 return VINF_SUCCESS;
2748}
2749
2750#endif /* !IEM_WITH_SETJMP */
2751
2752
2753/**
2754 * Fetches the next opcode dword and zero extends it to a quad word, returns
2755 * automatically on failure.
2756 *
2757 * @param a_pu64 Where to return the opcode quad word.
2758 * @remark Implicitly references pVCpu.
2759 */
2760#ifndef IEM_WITH_SETJMP
2761# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2762 do \
2763 { \
2764 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2765 if (rcStrict2 != VINF_SUCCESS) \
2766 return rcStrict2; \
2767 } while (0)
2768#else
2769# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2770#endif
2771
2772
2773#ifndef IEM_WITH_SETJMP
2774/**
2775 * Fetches the next signed double word from the opcode stream.
2776 *
2777 * @returns Strict VBox status code.
2778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2779 * @param pi32 Where to return the signed double word.
2780 */
2781DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2782{
2783 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2784}
2785#endif
2786
2787/**
2788 * Fetches the next signed double word from the opcode stream, returning
2789 * automatically on failure.
2790 *
2791 * @param a_pi32 Where to return the signed double word.
2792 * @remark Implicitly references pVCpu.
2793 */
2794#ifndef IEM_WITH_SETJMP
2795# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2796 do \
2797 { \
2798 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2799 if (rcStrict2 != VINF_SUCCESS) \
2800 return rcStrict2; \
2801 } while (0)
2802#else
2803# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2804#endif
2805
2806#ifndef IEM_WITH_SETJMP
2807
2808/**
2809 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2810 *
2811 * @returns Strict VBox status code.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 * @param pu64 Where to return the opcode qword.
2814 */
2815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2816{
2817 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2818 if (rcStrict == VINF_SUCCESS)
2819 {
2820 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2821 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2822 pVCpu->iem.s.abOpcode[offOpcode + 1],
2823 pVCpu->iem.s.abOpcode[offOpcode + 2],
2824 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2825 pVCpu->iem.s.offOpcode = offOpcode + 4;
2826 }
2827 else
2828 *pu64 = 0;
2829 return rcStrict;
2830}
2831
2832
2833/**
2834 * Fetches the next opcode dword, sign extending it into a quad word.
2835 *
2836 * @returns Strict VBox status code.
2837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2838 * @param pu64 Where to return the opcode quad word.
2839 */
2840DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2841{
2842 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2843 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2844 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2845
2846 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2847 pVCpu->iem.s.abOpcode[offOpcode + 1],
2848 pVCpu->iem.s.abOpcode[offOpcode + 2],
2849 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2850 *pu64 = i32;
2851 pVCpu->iem.s.offOpcode = offOpcode + 4;
2852 return VINF_SUCCESS;
2853}
2854
2855#endif /* !IEM_WITH_SETJMP */
2856
2857
2858/**
2859 * Fetches the next opcode double word and sign extends it to a quad word,
2860 * returns automatically on failure.
2861 *
2862 * @param a_pu64 Where to return the opcode quad word.
2863 * @remark Implicitly references pVCpu.
2864 */
2865#ifndef IEM_WITH_SETJMP
2866# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2867 do \
2868 { \
2869 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2870 if (rcStrict2 != VINF_SUCCESS) \
2871 return rcStrict2; \
2872 } while (0)
2873#else
2874# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2875#endif
2876
2877#ifndef IEM_WITH_SETJMP
2878
2879/**
2880 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2881 *
2882 * @returns Strict VBox status code.
2883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2884 * @param pu64 Where to return the opcode qword.
2885 */
2886DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2887{
2888 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2892# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2893 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2894# else
2895 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2896 pVCpu->iem.s.abOpcode[offOpcode + 1],
2897 pVCpu->iem.s.abOpcode[offOpcode + 2],
2898 pVCpu->iem.s.abOpcode[offOpcode + 3],
2899 pVCpu->iem.s.abOpcode[offOpcode + 4],
2900 pVCpu->iem.s.abOpcode[offOpcode + 5],
2901 pVCpu->iem.s.abOpcode[offOpcode + 6],
2902 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2903# endif
2904 pVCpu->iem.s.offOpcode = offOpcode + 8;
2905 }
2906 else
2907 *pu64 = 0;
2908 return rcStrict;
2909}
2910
2911
2912/**
2913 * Fetches the next opcode qword.
2914 *
2915 * @returns Strict VBox status code.
2916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2917 * @param pu64 Where to return the opcode qword.
2918 */
2919DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2920{
2921 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2922 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2923 {
2924# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2925 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2926# else
2927 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2928 pVCpu->iem.s.abOpcode[offOpcode + 1],
2929 pVCpu->iem.s.abOpcode[offOpcode + 2],
2930 pVCpu->iem.s.abOpcode[offOpcode + 3],
2931 pVCpu->iem.s.abOpcode[offOpcode + 4],
2932 pVCpu->iem.s.abOpcode[offOpcode + 5],
2933 pVCpu->iem.s.abOpcode[offOpcode + 6],
2934 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2935# endif
2936 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2937 return VINF_SUCCESS;
2938 }
2939 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2940}
2941
2942#else /* IEM_WITH_SETJMP */
2943
2944/**
2945 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2946 *
2947 * @returns The opcode qword.
2948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2949 */
2950DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2951{
2952# ifdef IEM_WITH_CODE_TLB
2953 uint64_t u64;
2954 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2955 return u64;
2956# else
2957 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2958 if (rcStrict == VINF_SUCCESS)
2959 {
2960 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2961 pVCpu->iem.s.offOpcode = offOpcode + 8;
2962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2963 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2964# else
2965 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2966 pVCpu->iem.s.abOpcode[offOpcode + 1],
2967 pVCpu->iem.s.abOpcode[offOpcode + 2],
2968 pVCpu->iem.s.abOpcode[offOpcode + 3],
2969 pVCpu->iem.s.abOpcode[offOpcode + 4],
2970 pVCpu->iem.s.abOpcode[offOpcode + 5],
2971 pVCpu->iem.s.abOpcode[offOpcode + 6],
2972 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2973# endif
2974 }
2975 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2976# endif
2977}
2978
2979
2980/**
2981 * Fetches the next opcode qword, longjmp on error.
2982 *
2983 * @returns The opcode qword.
2984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2985 */
2986DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2987{
2988# ifdef IEM_WITH_CODE_TLB
2989 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2990 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2991 if (RT_LIKELY( pbBuf != NULL
2992 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2993 {
2994 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2995# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2996 return *(uint64_t const *)&pbBuf[offBuf];
2997# else
2998 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2999 pbBuf[offBuf + 1],
3000 pbBuf[offBuf + 2],
3001 pbBuf[offBuf + 3],
3002 pbBuf[offBuf + 4],
3003 pbBuf[offBuf + 5],
3004 pbBuf[offBuf + 6],
3005 pbBuf[offBuf + 7]);
3006# endif
3007 }
3008# else
3009 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3010 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3011 {
3012 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3013# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3014 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3015# else
3016 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3017 pVCpu->iem.s.abOpcode[offOpcode + 1],
3018 pVCpu->iem.s.abOpcode[offOpcode + 2],
3019 pVCpu->iem.s.abOpcode[offOpcode + 3],
3020 pVCpu->iem.s.abOpcode[offOpcode + 4],
3021 pVCpu->iem.s.abOpcode[offOpcode + 5],
3022 pVCpu->iem.s.abOpcode[offOpcode + 6],
3023 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3024# endif
3025 }
3026# endif
3027 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3028}
3029
3030#endif /* IEM_WITH_SETJMP */
3031
3032/**
3033 * Fetches the next opcode quad word, returns automatically on failure.
3034 *
3035 * @param a_pu64 Where to return the opcode quad word.
3036 * @remark Implicitly references pVCpu.
3037 */
3038#ifndef IEM_WITH_SETJMP
3039# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3040 do \
3041 { \
3042 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3043 if (rcStrict2 != VINF_SUCCESS) \
3044 return rcStrict2; \
3045 } while (0)
3046#else
3047# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3048#endif
3049
3050
3051/** @name Misc Worker Functions.
3052 * @{
3053 */
3054
3055
3056/**
3057 * Validates a new SS segment.
3058 *
3059 * @returns VBox strict status code.
3060 * @param pVCpu The cross context virtual CPU structure of the
3061 * calling thread.
3062 * @param pCtx The CPU context.
3063 * @param NewSS The new SS selctor.
3064 * @param uCpl The CPL to load the stack for.
3065 * @param pDesc Where to return the descriptor.
3066 */
3067IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3068{
3069 NOREF(pCtx);
3070
3071 /* Null selectors are not allowed (we're not called for dispatching
3072 interrupts with SS=0 in long mode). */
3073 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3074 {
3075 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3076 return iemRaiseTaskSwitchFault0(pVCpu);
3077 }
3078
3079 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3080 if ((NewSS & X86_SEL_RPL) != uCpl)
3081 {
3082 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3083 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3084 }
3085
3086 /*
3087 * Read the descriptor.
3088 */
3089 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3090 if (rcStrict != VINF_SUCCESS)
3091 return rcStrict;
3092
3093 /*
3094 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3095 */
3096 if (!pDesc->Legacy.Gen.u1DescType)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3099 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3100 }
3101
3102 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3103 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3104 {
3105 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3106 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3107 }
3108 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3109 {
3110 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3111 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3112 }
3113
3114 /* Is it there? */
3115 /** @todo testcase: Is this checked before the canonical / limit check below? */
3116 if (!pDesc->Legacy.Gen.u1Present)
3117 {
3118 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3119 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3120 }
3121
3122 return VINF_SUCCESS;
3123}
3124
3125
3126/**
3127 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3128 * not.
3129 *
3130 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3131 * @param a_pCtx The CPU context.
3132 */
3133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3134# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3135 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3136 ? (a_pCtx)->eflags.u \
3137 : CPUMRawGetEFlags(a_pVCpu) )
3138#else
3139# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3140 ( (a_pCtx)->eflags.u )
3141#endif
3142
3143/**
3144 * Updates the EFLAGS in the correct manner wrt. PATM.
3145 *
3146 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3147 * @param a_pCtx The CPU context.
3148 * @param a_fEfl The new EFLAGS.
3149 */
3150#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3151# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3152 do { \
3153 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3154 (a_pCtx)->eflags.u = (a_fEfl); \
3155 else \
3156 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3157 } while (0)
3158#else
3159# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3160 do { \
3161 (a_pCtx)->eflags.u = (a_fEfl); \
3162 } while (0)
3163#endif
3164
3165
3166/** @} */
3167
3168/** @name Raising Exceptions.
3169 *
3170 * @{
3171 */
3172
3173/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3174 * @{ */
3175/** CPU exception. */
3176#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3177/** External interrupt (from PIC, APIC, whatever). */
3178#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3179/** Software interrupt (int or into, not bound).
3180 * Returns to the following instruction */
3181#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3182/** Takes an error code. */
3183#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3184/** Takes a CR2. */
3185#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3186/** Generated by the breakpoint instruction. */
3187#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3188/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3189#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3190/** @} */
3191
3192
3193/**
3194 * Loads the specified stack far pointer from the TSS.
3195 *
3196 * @returns VBox strict status code.
3197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3198 * @param pCtx The CPU context.
3199 * @param uCpl The CPL to load the stack for.
3200 * @param pSelSS Where to return the new stack segment.
3201 * @param puEsp Where to return the new stack pointer.
3202 */
3203IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3204 PRTSEL pSelSS, uint32_t *puEsp)
3205{
3206 VBOXSTRICTRC rcStrict;
3207 Assert(uCpl < 4);
3208
3209 switch (pCtx->tr.Attr.n.u4Type)
3210 {
3211 /*
3212 * 16-bit TSS (X86TSS16).
3213 */
3214 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3215 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3216 {
3217 uint32_t off = uCpl * 4 + 2;
3218 if (off + 4 <= pCtx->tr.u32Limit)
3219 {
3220 /** @todo check actual access pattern here. */
3221 uint32_t u32Tmp = 0; /* gcc maybe... */
3222 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3223 if (rcStrict == VINF_SUCCESS)
3224 {
3225 *puEsp = RT_LOWORD(u32Tmp);
3226 *pSelSS = RT_HIWORD(u32Tmp);
3227 return VINF_SUCCESS;
3228 }
3229 }
3230 else
3231 {
3232 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3233 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3234 }
3235 break;
3236 }
3237
3238 /*
3239 * 32-bit TSS (X86TSS32).
3240 */
3241 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3242 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3243 {
3244 uint32_t off = uCpl * 8 + 4;
3245 if (off + 7 <= pCtx->tr.u32Limit)
3246 {
3247/** @todo check actual access pattern here. */
3248 uint64_t u64Tmp;
3249 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3250 if (rcStrict == VINF_SUCCESS)
3251 {
3252 *puEsp = u64Tmp & UINT32_MAX;
3253 *pSelSS = (RTSEL)(u64Tmp >> 32);
3254 return VINF_SUCCESS;
3255 }
3256 }
3257 else
3258 {
3259 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3260 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3261 }
3262 break;
3263 }
3264
3265 default:
3266 AssertFailed();
3267 rcStrict = VERR_IEM_IPE_4;
3268 break;
3269 }
3270
3271 *puEsp = 0; /* make gcc happy */
3272 *pSelSS = 0; /* make gcc happy */
3273 return rcStrict;
3274}
3275
3276
3277/**
3278 * Loads the specified stack pointer from the 64-bit TSS.
3279 *
3280 * @returns VBox strict status code.
3281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3282 * @param pCtx The CPU context.
3283 * @param uCpl The CPL to load the stack for.
3284 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3285 * @param puRsp Where to return the new stack pointer.
3286 */
3287IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3288{
3289 Assert(uCpl < 4);
3290 Assert(uIst < 8);
3291 *puRsp = 0; /* make gcc happy */
3292
3293 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3294
3295 uint32_t off;
3296 if (uIst)
3297 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3298 else
3299 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3300 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3301 {
3302 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3303 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3304 }
3305
3306 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3307}
3308
3309
3310/**
3311 * Adjust the CPU state according to the exception being raised.
3312 *
3313 * @param pCtx The CPU context.
3314 * @param u8Vector The exception that has been raised.
3315 */
3316DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3317{
3318 switch (u8Vector)
3319 {
3320 case X86_XCPT_DB:
3321 pCtx->dr[7] &= ~X86_DR7_GD;
3322 break;
3323 /** @todo Read the AMD and Intel exception reference... */
3324 }
3325}
3326
3327
3328/**
3329 * Implements exceptions and interrupts for real mode.
3330 *
3331 * @returns VBox strict status code.
3332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3333 * @param pCtx The CPU context.
3334 * @param cbInstr The number of bytes to offset rIP by in the return
3335 * address.
3336 * @param u8Vector The interrupt / exception vector number.
3337 * @param fFlags The flags.
3338 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3339 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3340 */
3341IEM_STATIC VBOXSTRICTRC
3342iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3343 PCPUMCTX pCtx,
3344 uint8_t cbInstr,
3345 uint8_t u8Vector,
3346 uint32_t fFlags,
3347 uint16_t uErr,
3348 uint64_t uCr2)
3349{
3350 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3351 NOREF(uErr); NOREF(uCr2);
3352
3353 /*
3354 * Read the IDT entry.
3355 */
3356 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3357 {
3358 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3359 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3360 }
3361 RTFAR16 Idte;
3362 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3363 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3364 return rcStrict;
3365
3366 /*
3367 * Push the stack frame.
3368 */
3369 uint16_t *pu16Frame;
3370 uint64_t uNewRsp;
3371 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3372 if (rcStrict != VINF_SUCCESS)
3373 return rcStrict;
3374
3375 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3376#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3377 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3378 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3379 fEfl |= UINT16_C(0xf000);
3380#endif
3381 pu16Frame[2] = (uint16_t)fEfl;
3382 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3383 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3384 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3385 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3386 return rcStrict;
3387
3388 /*
3389 * Load the vector address into cs:ip and make exception specific state
3390 * adjustments.
3391 */
3392 pCtx->cs.Sel = Idte.sel;
3393 pCtx->cs.ValidSel = Idte.sel;
3394 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3395 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3396 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3397 pCtx->rip = Idte.off;
3398 fEfl &= ~X86_EFL_IF;
3399 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3400
3401 /** @todo do we actually do this in real mode? */
3402 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3403 iemRaiseXcptAdjustState(pCtx, u8Vector);
3404
3405 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3406}
3407
3408
3409/**
3410 * Loads a NULL data selector into when coming from V8086 mode.
3411 *
3412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3413 * @param pSReg Pointer to the segment register.
3414 */
3415IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3416{
3417 pSReg->Sel = 0;
3418 pSReg->ValidSel = 0;
3419 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3420 {
3421 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3422 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3423 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3424 }
3425 else
3426 {
3427 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3428 /** @todo check this on AMD-V */
3429 pSReg->u64Base = 0;
3430 pSReg->u32Limit = 0;
3431 }
3432}
3433
3434
3435/**
3436 * Loads a segment selector during a task switch in V8086 mode.
3437 *
3438 * @param pSReg Pointer to the segment register.
3439 * @param uSel The selector value to load.
3440 */
3441IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3442{
3443 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3444 pSReg->Sel = uSel;
3445 pSReg->ValidSel = uSel;
3446 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3447 pSReg->u64Base = uSel << 4;
3448 pSReg->u32Limit = 0xffff;
3449 pSReg->Attr.u = 0xf3;
3450}
3451
3452
3453/**
3454 * Loads a NULL data selector into a selector register, both the hidden and
3455 * visible parts, in protected mode.
3456 *
3457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param pSReg Pointer to the segment register.
3459 * @param uRpl The RPL.
3460 */
3461IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3462{
3463 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3464 * data selector in protected mode. */
3465 pSReg->Sel = uRpl;
3466 pSReg->ValidSel = uRpl;
3467 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3468 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3469 {
3470 /* VT-x (Intel 3960x) observed doing something like this. */
3471 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3472 pSReg->u32Limit = UINT32_MAX;
3473 pSReg->u64Base = 0;
3474 }
3475 else
3476 {
3477 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3478 pSReg->u32Limit = 0;
3479 pSReg->u64Base = 0;
3480 }
3481}
3482
3483
3484/**
3485 * Loads a segment selector during a task switch in protected mode.
3486 *
3487 * In this task switch scenario, we would throw \#TS exceptions rather than
3488 * \#GPs.
3489 *
3490 * @returns VBox strict status code.
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pSReg Pointer to the segment register.
3493 * @param uSel The new selector value.
3494 *
3495 * @remarks This does _not_ handle CS or SS.
3496 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3497 */
3498IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3499{
3500 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3501
3502 /* Null data selector. */
3503 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3504 {
3505 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3507 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3508 return VINF_SUCCESS;
3509 }
3510
3511 /* Fetch the descriptor. */
3512 IEMSELDESC Desc;
3513 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3514 if (rcStrict != VINF_SUCCESS)
3515 {
3516 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3517 VBOXSTRICTRC_VAL(rcStrict)));
3518 return rcStrict;
3519 }
3520
3521 /* Must be a data segment or readable code segment. */
3522 if ( !Desc.Legacy.Gen.u1DescType
3523 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3524 {
3525 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3526 Desc.Legacy.Gen.u4Type));
3527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3528 }
3529
3530 /* Check privileges for data segments and non-conforming code segments. */
3531 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3532 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3533 {
3534 /* The RPL and the new CPL must be less than or equal to the DPL. */
3535 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3536 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3537 {
3538 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3539 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3541 }
3542 }
3543
3544 /* Is it there? */
3545 if (!Desc.Legacy.Gen.u1Present)
3546 {
3547 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3548 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3549 }
3550
3551 /* The base and limit. */
3552 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3553 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3554
3555 /*
3556 * Ok, everything checked out fine. Now set the accessed bit before
3557 * committing the result into the registers.
3558 */
3559 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3560 {
3561 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3562 if (rcStrict != VINF_SUCCESS)
3563 return rcStrict;
3564 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3565 }
3566
3567 /* Commit */
3568 pSReg->Sel = uSel;
3569 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3570 pSReg->u32Limit = cbLimit;
3571 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3572 pSReg->ValidSel = uSel;
3573 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3574 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3575 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3576
3577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3578 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3579 return VINF_SUCCESS;
3580}
3581
3582
3583/**
3584 * Performs a task switch.
3585 *
3586 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3587 * caller is responsible for performing the necessary checks (like DPL, TSS
3588 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3589 * reference for JMP, CALL, IRET.
3590 *
3591 * If the task switch is the due to a software interrupt or hardware exception,
3592 * the caller is responsible for validating the TSS selector and descriptor. See
3593 * Intel Instruction reference for INT n.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param pCtx The CPU context.
3598 * @param enmTaskSwitch What caused this task switch.
3599 * @param uNextEip The EIP effective after the task switch.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 * @param SelTSS The TSS selector of the new task.
3604 * @param pNewDescTSS Pointer to the new TSS descriptor.
3605 */
3606IEM_STATIC VBOXSTRICTRC
3607iemTaskSwitch(PVMCPU pVCpu,
3608 PCPUMCTX pCtx,
3609 IEMTASKSWITCH enmTaskSwitch,
3610 uint32_t uNextEip,
3611 uint32_t fFlags,
3612 uint16_t uErr,
3613 uint64_t uCr2,
3614 RTSEL SelTSS,
3615 PIEMSELDESC pNewDescTSS)
3616{
3617 Assert(!IEM_IS_REAL_MODE(pVCpu));
3618 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3619
3620 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3621 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3622 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3623 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3624 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3625
3626 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3627 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3628
3629 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3630 fIsNewTSS386, pCtx->eip, uNextEip));
3631
3632 /* Update CR2 in case it's a page-fault. */
3633 /** @todo This should probably be done much earlier in IEM/PGM. See
3634 * @bugref{5653#c49}. */
3635 if (fFlags & IEM_XCPT_FLAGS_CR2)
3636 pCtx->cr2 = uCr2;
3637
3638 /*
3639 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3640 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3641 */
3642 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3643 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3644 if (uNewTSSLimit < uNewTSSLimitMin)
3645 {
3646 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3647 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3648 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3649 }
3650
3651 /*
3652 * Check the current TSS limit. The last written byte to the current TSS during the
3653 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3654 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3655 *
3656 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3657 * end up with smaller than "legal" TSS limits.
3658 */
3659 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3660 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3661 if (uCurTSSLimit < uCurTSSLimitMin)
3662 {
3663 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3664 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3665 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3666 }
3667
3668 /*
3669 * Verify that the new TSS can be accessed and map it. Map only the required contents
3670 * and not the entire TSS.
3671 */
3672 void *pvNewTSS;
3673 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3674 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3675 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3676 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3677 * not perform correct translation if this happens. See Intel spec. 7.2.1
3678 * "Task-State Segment" */
3679 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3680 if (rcStrict != VINF_SUCCESS)
3681 {
3682 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3683 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3684 return rcStrict;
3685 }
3686
3687 /*
3688 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3689 */
3690 uint32_t u32EFlags = pCtx->eflags.u32;
3691 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3692 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3693 {
3694 PX86DESC pDescCurTSS;
3695 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3696 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3697 if (rcStrict != VINF_SUCCESS)
3698 {
3699 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3700 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703
3704 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3705 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3706 if (rcStrict != VINF_SUCCESS)
3707 {
3708 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3709 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3710 return rcStrict;
3711 }
3712
3713 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3714 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3715 {
3716 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3717 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3718 u32EFlags &= ~X86_EFL_NT;
3719 }
3720 }
3721
3722 /*
3723 * Save the CPU state into the current TSS.
3724 */
3725 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3726 if (GCPtrNewTSS == GCPtrCurTSS)
3727 {
3728 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3729 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3730 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3731 }
3732 if (fIsNewTSS386)
3733 {
3734 /*
3735 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3736 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3737 */
3738 void *pvCurTSS32;
3739 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3740 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3741 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3742 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3743 if (rcStrict != VINF_SUCCESS)
3744 {
3745 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3746 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3747 return rcStrict;
3748 }
3749
3750 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3751 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3752 pCurTSS32->eip = uNextEip;
3753 pCurTSS32->eflags = u32EFlags;
3754 pCurTSS32->eax = pCtx->eax;
3755 pCurTSS32->ecx = pCtx->ecx;
3756 pCurTSS32->edx = pCtx->edx;
3757 pCurTSS32->ebx = pCtx->ebx;
3758 pCurTSS32->esp = pCtx->esp;
3759 pCurTSS32->ebp = pCtx->ebp;
3760 pCurTSS32->esi = pCtx->esi;
3761 pCurTSS32->edi = pCtx->edi;
3762 pCurTSS32->es = pCtx->es.Sel;
3763 pCurTSS32->cs = pCtx->cs.Sel;
3764 pCurTSS32->ss = pCtx->ss.Sel;
3765 pCurTSS32->ds = pCtx->ds.Sel;
3766 pCurTSS32->fs = pCtx->fs.Sel;
3767 pCurTSS32->gs = pCtx->gs.Sel;
3768
3769 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3770 if (rcStrict != VINF_SUCCESS)
3771 {
3772 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3773 VBOXSTRICTRC_VAL(rcStrict)));
3774 return rcStrict;
3775 }
3776 }
3777 else
3778 {
3779 /*
3780 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3781 */
3782 void *pvCurTSS16;
3783 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3784 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3785 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3786 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3787 if (rcStrict != VINF_SUCCESS)
3788 {
3789 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3790 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3791 return rcStrict;
3792 }
3793
3794 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3795 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3796 pCurTSS16->ip = uNextEip;
3797 pCurTSS16->flags = u32EFlags;
3798 pCurTSS16->ax = pCtx->ax;
3799 pCurTSS16->cx = pCtx->cx;
3800 pCurTSS16->dx = pCtx->dx;
3801 pCurTSS16->bx = pCtx->bx;
3802 pCurTSS16->sp = pCtx->sp;
3803 pCurTSS16->bp = pCtx->bp;
3804 pCurTSS16->si = pCtx->si;
3805 pCurTSS16->di = pCtx->di;
3806 pCurTSS16->es = pCtx->es.Sel;
3807 pCurTSS16->cs = pCtx->cs.Sel;
3808 pCurTSS16->ss = pCtx->ss.Sel;
3809 pCurTSS16->ds = pCtx->ds.Sel;
3810
3811 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3812 if (rcStrict != VINF_SUCCESS)
3813 {
3814 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3815 VBOXSTRICTRC_VAL(rcStrict)));
3816 return rcStrict;
3817 }
3818 }
3819
3820 /*
3821 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3822 */
3823 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3824 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3825 {
3826 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3827 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3828 pNewTSS->selPrev = pCtx->tr.Sel;
3829 }
3830
3831 /*
3832 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3833 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3834 */
3835 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3836 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3837 bool fNewDebugTrap;
3838 if (fIsNewTSS386)
3839 {
3840 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3841 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3842 uNewEip = pNewTSS32->eip;
3843 uNewEflags = pNewTSS32->eflags;
3844 uNewEax = pNewTSS32->eax;
3845 uNewEcx = pNewTSS32->ecx;
3846 uNewEdx = pNewTSS32->edx;
3847 uNewEbx = pNewTSS32->ebx;
3848 uNewEsp = pNewTSS32->esp;
3849 uNewEbp = pNewTSS32->ebp;
3850 uNewEsi = pNewTSS32->esi;
3851 uNewEdi = pNewTSS32->edi;
3852 uNewES = pNewTSS32->es;
3853 uNewCS = pNewTSS32->cs;
3854 uNewSS = pNewTSS32->ss;
3855 uNewDS = pNewTSS32->ds;
3856 uNewFS = pNewTSS32->fs;
3857 uNewGS = pNewTSS32->gs;
3858 uNewLdt = pNewTSS32->selLdt;
3859 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3860 }
3861 else
3862 {
3863 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3864 uNewCr3 = 0;
3865 uNewEip = pNewTSS16->ip;
3866 uNewEflags = pNewTSS16->flags;
3867 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3868 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3869 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3870 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3871 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3872 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3873 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3874 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3875 uNewES = pNewTSS16->es;
3876 uNewCS = pNewTSS16->cs;
3877 uNewSS = pNewTSS16->ss;
3878 uNewDS = pNewTSS16->ds;
3879 uNewFS = 0;
3880 uNewGS = 0;
3881 uNewLdt = pNewTSS16->selLdt;
3882 fNewDebugTrap = false;
3883 }
3884
3885 if (GCPtrNewTSS == GCPtrCurTSS)
3886 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3887 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3888
3889 /*
3890 * We're done accessing the new TSS.
3891 */
3892 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3893 if (rcStrict != VINF_SUCCESS)
3894 {
3895 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898
3899 /*
3900 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3901 */
3902 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3903 {
3904 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3905 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3906 if (rcStrict != VINF_SUCCESS)
3907 {
3908 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3909 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3910 return rcStrict;
3911 }
3912
3913 /* Check that the descriptor indicates the new TSS is available (not busy). */
3914 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3915 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3916 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3917
3918 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3919 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3920 if (rcStrict != VINF_SUCCESS)
3921 {
3922 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3923 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3924 return rcStrict;
3925 }
3926 }
3927
3928 /*
3929 * From this point on, we're technically in the new task. We will defer exceptions
3930 * until the completion of the task switch but before executing any instructions in the new task.
3931 */
3932 pCtx->tr.Sel = SelTSS;
3933 pCtx->tr.ValidSel = SelTSS;
3934 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3935 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3936 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3937 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3938 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3939
3940 /* Set the busy bit in TR. */
3941 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3942 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3943 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3944 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3945 {
3946 uNewEflags |= X86_EFL_NT;
3947 }
3948
3949 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3950 pCtx->cr0 |= X86_CR0_TS;
3951 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3952
3953 pCtx->eip = uNewEip;
3954 pCtx->eax = uNewEax;
3955 pCtx->ecx = uNewEcx;
3956 pCtx->edx = uNewEdx;
3957 pCtx->ebx = uNewEbx;
3958 pCtx->esp = uNewEsp;
3959 pCtx->ebp = uNewEbp;
3960 pCtx->esi = uNewEsi;
3961 pCtx->edi = uNewEdi;
3962
3963 uNewEflags &= X86_EFL_LIVE_MASK;
3964 uNewEflags |= X86_EFL_RA1_MASK;
3965 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3966
3967 /*
3968 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3969 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3970 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3971 */
3972 pCtx->es.Sel = uNewES;
3973 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3974
3975 pCtx->cs.Sel = uNewCS;
3976 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3977
3978 pCtx->ss.Sel = uNewSS;
3979 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3980
3981 pCtx->ds.Sel = uNewDS;
3982 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3983
3984 pCtx->fs.Sel = uNewFS;
3985 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3986
3987 pCtx->gs.Sel = uNewGS;
3988 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3990
3991 pCtx->ldtr.Sel = uNewLdt;
3992 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3993 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3994 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3995
3996 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3997 {
3998 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3999 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4000 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4001 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4002 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4003 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4004 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4005 }
4006
4007 /*
4008 * Switch CR3 for the new task.
4009 */
4010 if ( fIsNewTSS386
4011 && (pCtx->cr0 & X86_CR0_PG))
4012 {
4013 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4014 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4015 {
4016 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4017 AssertRCSuccessReturn(rc, rc);
4018 }
4019 else
4020 pCtx->cr3 = uNewCr3;
4021
4022 /* Inform PGM. */
4023 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4024 {
4025 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4026 AssertRCReturn(rc, rc);
4027 /* ignore informational status codes */
4028 }
4029 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4030 }
4031
4032 /*
4033 * Switch LDTR for the new task.
4034 */
4035 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4036 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4037 else
4038 {
4039 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4040
4041 IEMSELDESC DescNewLdt;
4042 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4043 if (rcStrict != VINF_SUCCESS)
4044 {
4045 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4046 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4047 return rcStrict;
4048 }
4049 if ( !DescNewLdt.Legacy.Gen.u1Present
4050 || DescNewLdt.Legacy.Gen.u1DescType
4051 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4052 {
4053 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4054 uNewLdt, DescNewLdt.Legacy.u));
4055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4056 }
4057
4058 pCtx->ldtr.ValidSel = uNewLdt;
4059 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4060 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4061 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4062 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4063 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4064 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4066 }
4067
4068 IEMSELDESC DescSS;
4069 if (IEM_IS_V86_MODE(pVCpu))
4070 {
4071 pVCpu->iem.s.uCpl = 3;
4072 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4073 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4074 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4075 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4076 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4077 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4078
4079 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4080 DescSS.Legacy.u = 0;
4081 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4082 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4083 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4084 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4085 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4086 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4087 DescSS.Legacy.Gen.u2Dpl = 3;
4088 }
4089 else
4090 {
4091 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4092
4093 /*
4094 * Load the stack segment for the new task.
4095 */
4096 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4097 {
4098 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4100 }
4101
4102 /* Fetch the descriptor. */
4103 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4107 VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* SS must be a data segment and writable. */
4112 if ( !DescSS.Legacy.Gen.u1DescType
4113 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4114 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4115 {
4116 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4117 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4118 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4119 }
4120
4121 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4122 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4123 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4124 {
4125 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4126 uNewCpl));
4127 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4128 }
4129
4130 /* Is it there? */
4131 if (!DescSS.Legacy.Gen.u1Present)
4132 {
4133 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4134 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4135 }
4136
4137 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4138 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4139
4140 /* Set the accessed bit before committing the result into SS. */
4141 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4142 {
4143 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4144 if (rcStrict != VINF_SUCCESS)
4145 return rcStrict;
4146 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4147 }
4148
4149 /* Commit SS. */
4150 pCtx->ss.Sel = uNewSS;
4151 pCtx->ss.ValidSel = uNewSS;
4152 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4153 pCtx->ss.u32Limit = cbLimit;
4154 pCtx->ss.u64Base = u64Base;
4155 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4157
4158 /* CPL has changed, update IEM before loading rest of segments. */
4159 pVCpu->iem.s.uCpl = uNewCpl;
4160
4161 /*
4162 * Load the data segments for the new task.
4163 */
4164 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4165 if (rcStrict != VINF_SUCCESS)
4166 return rcStrict;
4167 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4168 if (rcStrict != VINF_SUCCESS)
4169 return rcStrict;
4170 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4171 if (rcStrict != VINF_SUCCESS)
4172 return rcStrict;
4173 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4174 if (rcStrict != VINF_SUCCESS)
4175 return rcStrict;
4176
4177 /*
4178 * Load the code segment for the new task.
4179 */
4180 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4181 {
4182 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4184 }
4185
4186 /* Fetch the descriptor. */
4187 IEMSELDESC DescCS;
4188 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4189 if (rcStrict != VINF_SUCCESS)
4190 {
4191 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4192 return rcStrict;
4193 }
4194
4195 /* CS must be a code segment. */
4196 if ( !DescCS.Legacy.Gen.u1DescType
4197 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4198 {
4199 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4200 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4201 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4202 }
4203
4204 /* For conforming CS, DPL must be less than or equal to the RPL. */
4205 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4206 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4207 {
4208 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4209 DescCS.Legacy.Gen.u2Dpl));
4210 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4211 }
4212
4213 /* For non-conforming CS, DPL must match RPL. */
4214 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4215 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4216 {
4217 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4218 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4220 }
4221
4222 /* Is it there? */
4223 if (!DescCS.Legacy.Gen.u1Present)
4224 {
4225 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4226 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4227 }
4228
4229 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4230 u64Base = X86DESC_BASE(&DescCS.Legacy);
4231
4232 /* Set the accessed bit before committing the result into CS. */
4233 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4234 {
4235 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4239 }
4240
4241 /* Commit CS. */
4242 pCtx->cs.Sel = uNewCS;
4243 pCtx->cs.ValidSel = uNewCS;
4244 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4245 pCtx->cs.u32Limit = cbLimit;
4246 pCtx->cs.u64Base = u64Base;
4247 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4249 }
4250
4251 /** @todo Debug trap. */
4252 if (fIsNewTSS386 && fNewDebugTrap)
4253 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4254
4255 /*
4256 * Construct the error code masks based on what caused this task switch.
4257 * See Intel Instruction reference for INT.
4258 */
4259 uint16_t uExt;
4260 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4261 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4262 {
4263 uExt = 1;
4264 }
4265 else
4266 uExt = 0;
4267
4268 /*
4269 * Push any error code on to the new stack.
4270 */
4271 if (fFlags & IEM_XCPT_FLAGS_ERR)
4272 {
4273 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4274 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4275 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4276
4277 /* Check that there is sufficient space on the stack. */
4278 /** @todo Factor out segment limit checking for normal/expand down segments
4279 * into a separate function. */
4280 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4281 {
4282 if ( pCtx->esp - 1 > cbLimitSS
4283 || pCtx->esp < cbStackFrame)
4284 {
4285 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4286 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4287 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4288 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4289 }
4290 }
4291 else
4292 {
4293 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4294 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4295 {
4296 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4297 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4298 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4299 }
4300 }
4301
4302
4303 if (fIsNewTSS386)
4304 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4305 else
4306 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4307 if (rcStrict != VINF_SUCCESS)
4308 {
4309 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4310 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4311 return rcStrict;
4312 }
4313 }
4314
4315 /* Check the new EIP against the new CS limit. */
4316 if (pCtx->eip > pCtx->cs.u32Limit)
4317 {
4318 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4319 pCtx->eip, pCtx->cs.u32Limit));
4320 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4321 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4322 }
4323
4324 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4325 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4326}
4327
4328
4329/**
4330 * Implements exceptions and interrupts for protected mode.
4331 *
4332 * @returns VBox strict status code.
4333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4334 * @param pCtx The CPU context.
4335 * @param cbInstr The number of bytes to offset rIP by in the return
4336 * address.
4337 * @param u8Vector The interrupt / exception vector number.
4338 * @param fFlags The flags.
4339 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4340 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4341 */
4342IEM_STATIC VBOXSTRICTRC
4343iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4344 PCPUMCTX pCtx,
4345 uint8_t cbInstr,
4346 uint8_t u8Vector,
4347 uint32_t fFlags,
4348 uint16_t uErr,
4349 uint64_t uCr2)
4350{
4351 /*
4352 * Read the IDT entry.
4353 */
4354 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4355 {
4356 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4357 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4358 }
4359 X86DESC Idte;
4360 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4361 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4362 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4363 return rcStrict;
4364 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4365 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4366 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4367
4368 /*
4369 * Check the descriptor type, DPL and such.
4370 * ASSUMES this is done in the same order as described for call-gate calls.
4371 */
4372 if (Idte.Gate.u1DescType)
4373 {
4374 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4375 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4376 }
4377 bool fTaskGate = false;
4378 uint8_t f32BitGate = true;
4379 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4380 switch (Idte.Gate.u4Type)
4381 {
4382 case X86_SEL_TYPE_SYS_UNDEFINED:
4383 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4384 case X86_SEL_TYPE_SYS_LDT:
4385 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4386 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4387 case X86_SEL_TYPE_SYS_UNDEFINED2:
4388 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4389 case X86_SEL_TYPE_SYS_UNDEFINED3:
4390 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4391 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4392 case X86_SEL_TYPE_SYS_UNDEFINED4:
4393 {
4394 /** @todo check what actually happens when the type is wrong...
4395 * esp. call gates. */
4396 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4397 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4398 }
4399
4400 case X86_SEL_TYPE_SYS_286_INT_GATE:
4401 f32BitGate = false;
4402 case X86_SEL_TYPE_SYS_386_INT_GATE:
4403 fEflToClear |= X86_EFL_IF;
4404 break;
4405
4406 case X86_SEL_TYPE_SYS_TASK_GATE:
4407 fTaskGate = true;
4408#ifndef IEM_IMPLEMENTS_TASKSWITCH
4409 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4410#endif
4411 break;
4412
4413 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4414 f32BitGate = false;
4415 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4416 break;
4417
4418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4419 }
4420
4421 /* Check DPL against CPL if applicable. */
4422 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4423 {
4424 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4425 {
4426 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4427 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4428 }
4429 }
4430
4431 /* Is it there? */
4432 if (!Idte.Gate.u1Present)
4433 {
4434 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4435 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4436 }
4437
4438 /* Is it a task-gate? */
4439 if (fTaskGate)
4440 {
4441 /*
4442 * Construct the error code masks based on what caused this task switch.
4443 * See Intel Instruction reference for INT.
4444 */
4445 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4446 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4447 RTSEL SelTSS = Idte.Gate.u16Sel;
4448
4449 /*
4450 * Fetch the TSS descriptor in the GDT.
4451 */
4452 IEMSELDESC DescTSS;
4453 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4454 if (rcStrict != VINF_SUCCESS)
4455 {
4456 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4457 VBOXSTRICTRC_VAL(rcStrict)));
4458 return rcStrict;
4459 }
4460
4461 /* The TSS descriptor must be a system segment and be available (not busy). */
4462 if ( DescTSS.Legacy.Gen.u1DescType
4463 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4464 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4465 {
4466 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4467 u8Vector, SelTSS, DescTSS.Legacy.au64));
4468 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4469 }
4470
4471 /* The TSS must be present. */
4472 if (!DescTSS.Legacy.Gen.u1Present)
4473 {
4474 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4475 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4476 }
4477
4478 /* Do the actual task switch. */
4479 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4480 }
4481
4482 /* A null CS is bad. */
4483 RTSEL NewCS = Idte.Gate.u16Sel;
4484 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4485 {
4486 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4487 return iemRaiseGeneralProtectionFault0(pVCpu);
4488 }
4489
4490 /* Fetch the descriptor for the new CS. */
4491 IEMSELDESC DescCS;
4492 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4493 if (rcStrict != VINF_SUCCESS)
4494 {
4495 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4496 return rcStrict;
4497 }
4498
4499 /* Must be a code segment. */
4500 if (!DescCS.Legacy.Gen.u1DescType)
4501 {
4502 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4503 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4504 }
4505 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4506 {
4507 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4508 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Don't allow lowering the privilege level. */
4512 /** @todo Does the lowering of privileges apply to software interrupts
4513 * only? This has bearings on the more-privileged or
4514 * same-privilege stack behavior further down. A testcase would
4515 * be nice. */
4516 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4517 {
4518 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4519 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4520 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4521 }
4522
4523 /* Make sure the selector is present. */
4524 if (!DescCS.Legacy.Gen.u1Present)
4525 {
4526 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4527 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4528 }
4529
4530 /* Check the new EIP against the new CS limit. */
4531 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4532 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4533 ? Idte.Gate.u16OffsetLow
4534 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4535 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4536 if (uNewEip > cbLimitCS)
4537 {
4538 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4539 u8Vector, uNewEip, cbLimitCS, NewCS));
4540 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4541 }
4542 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4543
4544 /* Calc the flag image to push. */
4545 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4546 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4547 fEfl &= ~X86_EFL_RF;
4548 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4549 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4550
4551 /* From V8086 mode only go to CPL 0. */
4552 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4553 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4554 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4555 {
4556 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4557 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4558 }
4559
4560 /*
4561 * If the privilege level changes, we need to get a new stack from the TSS.
4562 * This in turns means validating the new SS and ESP...
4563 */
4564 if (uNewCpl != pVCpu->iem.s.uCpl)
4565 {
4566 RTSEL NewSS;
4567 uint32_t uNewEsp;
4568 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571
4572 IEMSELDESC DescSS;
4573 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4574 if (rcStrict != VINF_SUCCESS)
4575 return rcStrict;
4576 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4577
4578 /* Check that there is sufficient space for the stack frame. */
4579 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4580 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4581 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4582 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4583
4584 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4585 {
4586 if ( uNewEsp - 1 > cbLimitSS
4587 || uNewEsp < cbStackFrame)
4588 {
4589 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4590 u8Vector, NewSS, uNewEsp, cbStackFrame));
4591 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4592 }
4593 }
4594 else
4595 {
4596 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4597 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4598 {
4599 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4600 u8Vector, NewSS, uNewEsp, cbStackFrame));
4601 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4602 }
4603 }
4604
4605 /*
4606 * Start making changes.
4607 */
4608
4609 /* Set the new CPL so that stack accesses use it. */
4610 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4611 pVCpu->iem.s.uCpl = uNewCpl;
4612
4613 /* Create the stack frame. */
4614 RTPTRUNION uStackFrame;
4615 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4616 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4617 if (rcStrict != VINF_SUCCESS)
4618 return rcStrict;
4619 void * const pvStackFrame = uStackFrame.pv;
4620 if (f32BitGate)
4621 {
4622 if (fFlags & IEM_XCPT_FLAGS_ERR)
4623 *uStackFrame.pu32++ = uErr;
4624 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4625 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4626 uStackFrame.pu32[2] = fEfl;
4627 uStackFrame.pu32[3] = pCtx->esp;
4628 uStackFrame.pu32[4] = pCtx->ss.Sel;
4629 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4630 if (fEfl & X86_EFL_VM)
4631 {
4632 uStackFrame.pu32[1] = pCtx->cs.Sel;
4633 uStackFrame.pu32[5] = pCtx->es.Sel;
4634 uStackFrame.pu32[6] = pCtx->ds.Sel;
4635 uStackFrame.pu32[7] = pCtx->fs.Sel;
4636 uStackFrame.pu32[8] = pCtx->gs.Sel;
4637 }
4638 }
4639 else
4640 {
4641 if (fFlags & IEM_XCPT_FLAGS_ERR)
4642 *uStackFrame.pu16++ = uErr;
4643 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4644 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4645 uStackFrame.pu16[2] = fEfl;
4646 uStackFrame.pu16[3] = pCtx->sp;
4647 uStackFrame.pu16[4] = pCtx->ss.Sel;
4648 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4649 if (fEfl & X86_EFL_VM)
4650 {
4651 uStackFrame.pu16[1] = pCtx->cs.Sel;
4652 uStackFrame.pu16[5] = pCtx->es.Sel;
4653 uStackFrame.pu16[6] = pCtx->ds.Sel;
4654 uStackFrame.pu16[7] = pCtx->fs.Sel;
4655 uStackFrame.pu16[8] = pCtx->gs.Sel;
4656 }
4657 }
4658 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4659 if (rcStrict != VINF_SUCCESS)
4660 return rcStrict;
4661
4662 /* Mark the selectors 'accessed' (hope this is the correct time). */
4663 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4664 * after pushing the stack frame? (Write protect the gdt + stack to
4665 * find out.) */
4666 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4667 {
4668 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4669 if (rcStrict != VINF_SUCCESS)
4670 return rcStrict;
4671 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4672 }
4673
4674 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4675 {
4676 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4677 if (rcStrict != VINF_SUCCESS)
4678 return rcStrict;
4679 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4680 }
4681
4682 /*
4683 * Start comitting the register changes (joins with the DPL=CPL branch).
4684 */
4685 pCtx->ss.Sel = NewSS;
4686 pCtx->ss.ValidSel = NewSS;
4687 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4688 pCtx->ss.u32Limit = cbLimitSS;
4689 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4690 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4691 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4692 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4693 * SP is loaded).
4694 * Need to check the other combinations too:
4695 * - 16-bit TSS, 32-bit handler
4696 * - 32-bit TSS, 16-bit handler */
4697 if (!pCtx->ss.Attr.n.u1DefBig)
4698 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4699 else
4700 pCtx->rsp = uNewEsp - cbStackFrame;
4701
4702 if (fEfl & X86_EFL_VM)
4703 {
4704 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4705 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4706 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4707 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4708 }
4709 }
4710 /*
4711 * Same privilege, no stack change and smaller stack frame.
4712 */
4713 else
4714 {
4715 uint64_t uNewRsp;
4716 RTPTRUNION uStackFrame;
4717 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4718 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4719 if (rcStrict != VINF_SUCCESS)
4720 return rcStrict;
4721 void * const pvStackFrame = uStackFrame.pv;
4722
4723 if (f32BitGate)
4724 {
4725 if (fFlags & IEM_XCPT_FLAGS_ERR)
4726 *uStackFrame.pu32++ = uErr;
4727 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4728 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4729 uStackFrame.pu32[2] = fEfl;
4730 }
4731 else
4732 {
4733 if (fFlags & IEM_XCPT_FLAGS_ERR)
4734 *uStackFrame.pu16++ = uErr;
4735 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4736 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4737 uStackFrame.pu16[2] = fEfl;
4738 }
4739 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4740 if (rcStrict != VINF_SUCCESS)
4741 return rcStrict;
4742
4743 /* Mark the CS selector as 'accessed'. */
4744 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4745 {
4746 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4747 if (rcStrict != VINF_SUCCESS)
4748 return rcStrict;
4749 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4750 }
4751
4752 /*
4753 * Start committing the register changes (joins with the other branch).
4754 */
4755 pCtx->rsp = uNewRsp;
4756 }
4757
4758 /* ... register committing continues. */
4759 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4760 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4761 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4762 pCtx->cs.u32Limit = cbLimitCS;
4763 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4764 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4765
4766 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4767 fEfl &= ~fEflToClear;
4768 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4769
4770 if (fFlags & IEM_XCPT_FLAGS_CR2)
4771 pCtx->cr2 = uCr2;
4772
4773 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4774 iemRaiseXcptAdjustState(pCtx, u8Vector);
4775
4776 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Implements exceptions and interrupts for long mode.
4782 *
4783 * @returns VBox strict status code.
4784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4785 * @param pCtx The CPU context.
4786 * @param cbInstr The number of bytes to offset rIP by in the return
4787 * address.
4788 * @param u8Vector The interrupt / exception vector number.
4789 * @param fFlags The flags.
4790 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4791 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4792 */
4793IEM_STATIC VBOXSTRICTRC
4794iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4795 PCPUMCTX pCtx,
4796 uint8_t cbInstr,
4797 uint8_t u8Vector,
4798 uint32_t fFlags,
4799 uint16_t uErr,
4800 uint64_t uCr2)
4801{
4802 /*
4803 * Read the IDT entry.
4804 */
4805 uint16_t offIdt = (uint16_t)u8Vector << 4;
4806 if (pCtx->idtr.cbIdt < offIdt + 7)
4807 {
4808 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4809 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4810 }
4811 X86DESC64 Idte;
4812 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4813 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4814 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4815 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4816 return rcStrict;
4817 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4818 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4819 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4820
4821 /*
4822 * Check the descriptor type, DPL and such.
4823 * ASSUMES this is done in the same order as described for call-gate calls.
4824 */
4825 if (Idte.Gate.u1DescType)
4826 {
4827 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4828 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4829 }
4830 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4831 switch (Idte.Gate.u4Type)
4832 {
4833 case AMD64_SEL_TYPE_SYS_INT_GATE:
4834 fEflToClear |= X86_EFL_IF;
4835 break;
4836 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4837 break;
4838
4839 default:
4840 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4841 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4842 }
4843
4844 /* Check DPL against CPL if applicable. */
4845 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4846 {
4847 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4848 {
4849 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4850 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4851 }
4852 }
4853
4854 /* Is it there? */
4855 if (!Idte.Gate.u1Present)
4856 {
4857 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4858 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4859 }
4860
4861 /* A null CS is bad. */
4862 RTSEL NewCS = Idte.Gate.u16Sel;
4863 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4864 {
4865 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4866 return iemRaiseGeneralProtectionFault0(pVCpu);
4867 }
4868
4869 /* Fetch the descriptor for the new CS. */
4870 IEMSELDESC DescCS;
4871 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4872 if (rcStrict != VINF_SUCCESS)
4873 {
4874 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4875 return rcStrict;
4876 }
4877
4878 /* Must be a 64-bit code segment. */
4879 if (!DescCS.Long.Gen.u1DescType)
4880 {
4881 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4882 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4883 }
4884 if ( !DescCS.Long.Gen.u1Long
4885 || DescCS.Long.Gen.u1DefBig
4886 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4887 {
4888 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4889 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4890 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4891 }
4892
4893 /* Don't allow lowering the privilege level. For non-conforming CS
4894 selectors, the CS.DPL sets the privilege level the trap/interrupt
4895 handler runs at. For conforming CS selectors, the CPL remains
4896 unchanged, but the CS.DPL must be <= CPL. */
4897 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4898 * when CPU in Ring-0. Result \#GP? */
4899 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4900 {
4901 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4902 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4903 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4904 }
4905
4906
4907 /* Make sure the selector is present. */
4908 if (!DescCS.Legacy.Gen.u1Present)
4909 {
4910 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4911 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4912 }
4913
4914 /* Check that the new RIP is canonical. */
4915 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4916 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4917 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4918 if (!IEM_IS_CANONICAL(uNewRip))
4919 {
4920 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4921 return iemRaiseGeneralProtectionFault0(pVCpu);
4922 }
4923
4924 /*
4925 * If the privilege level changes or if the IST isn't zero, we need to get
4926 * a new stack from the TSS.
4927 */
4928 uint64_t uNewRsp;
4929 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4930 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4931 if ( uNewCpl != pVCpu->iem.s.uCpl
4932 || Idte.Gate.u3IST != 0)
4933 {
4934 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4935 if (rcStrict != VINF_SUCCESS)
4936 return rcStrict;
4937 }
4938 else
4939 uNewRsp = pCtx->rsp;
4940 uNewRsp &= ~(uint64_t)0xf;
4941
4942 /*
4943 * Calc the flag image to push.
4944 */
4945 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4946 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4947 fEfl &= ~X86_EFL_RF;
4948 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4949 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4950
4951 /*
4952 * Start making changes.
4953 */
4954 /* Set the new CPL so that stack accesses use it. */
4955 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4956 pVCpu->iem.s.uCpl = uNewCpl;
4957
4958 /* Create the stack frame. */
4959 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4960 RTPTRUNION uStackFrame;
4961 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4962 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4963 if (rcStrict != VINF_SUCCESS)
4964 return rcStrict;
4965 void * const pvStackFrame = uStackFrame.pv;
4966
4967 if (fFlags & IEM_XCPT_FLAGS_ERR)
4968 *uStackFrame.pu64++ = uErr;
4969 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4970 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4971 uStackFrame.pu64[2] = fEfl;
4972 uStackFrame.pu64[3] = pCtx->rsp;
4973 uStackFrame.pu64[4] = pCtx->ss.Sel;
4974 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4975 if (rcStrict != VINF_SUCCESS)
4976 return rcStrict;
4977
4978 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4979 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4980 * after pushing the stack frame? (Write protect the gdt + stack to
4981 * find out.) */
4982 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4983 {
4984 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4985 if (rcStrict != VINF_SUCCESS)
4986 return rcStrict;
4987 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4988 }
4989
4990 /*
4991 * Start comitting the register changes.
4992 */
4993 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4994 * hidden registers when interrupting 32-bit or 16-bit code! */
4995 if (uNewCpl != uOldCpl)
4996 {
4997 pCtx->ss.Sel = 0 | uNewCpl;
4998 pCtx->ss.ValidSel = 0 | uNewCpl;
4999 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5000 pCtx->ss.u32Limit = UINT32_MAX;
5001 pCtx->ss.u64Base = 0;
5002 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5003 }
5004 pCtx->rsp = uNewRsp - cbStackFrame;
5005 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5006 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5007 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5008 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5009 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5010 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5011 pCtx->rip = uNewRip;
5012
5013 fEfl &= ~fEflToClear;
5014 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5015
5016 if (fFlags & IEM_XCPT_FLAGS_CR2)
5017 pCtx->cr2 = uCr2;
5018
5019 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5020 iemRaiseXcptAdjustState(pCtx, u8Vector);
5021
5022 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5023}
5024
5025
5026/**
5027 * Implements exceptions and interrupts.
5028 *
5029 * All exceptions and interrupts goes thru this function!
5030 *
5031 * @returns VBox strict status code.
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param cbInstr The number of bytes to offset rIP by in the return
5034 * address.
5035 * @param u8Vector The interrupt / exception vector number.
5036 * @param fFlags The flags.
5037 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5038 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5039 */
5040DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5041iemRaiseXcptOrInt(PVMCPU pVCpu,
5042 uint8_t cbInstr,
5043 uint8_t u8Vector,
5044 uint32_t fFlags,
5045 uint16_t uErr,
5046 uint64_t uCr2)
5047{
5048 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5049#ifdef IN_RING0
5050 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5051 AssertRCReturn(rc, rc);
5052#endif
5053
5054#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5055 /*
5056 * Flush prefetch buffer
5057 */
5058 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5059#endif
5060
5061 /*
5062 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5063 */
5064 if ( pCtx->eflags.Bits.u1VM
5065 && pCtx->eflags.Bits.u2IOPL != 3
5066 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5067 && (pCtx->cr0 & X86_CR0_PE) )
5068 {
5069 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5070 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5071 u8Vector = X86_XCPT_GP;
5072 uErr = 0;
5073 }
5074#ifdef DBGFTRACE_ENABLED
5075 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5076 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5077 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5078#endif
5079
5080 /*
5081 * Do recursion accounting.
5082 */
5083 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5084 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5085 if (pVCpu->iem.s.cXcptRecursions == 0)
5086 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5087 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5088 else
5089 {
5090 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5091 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5092
5093 /** @todo double and tripple faults. */
5094 if (pVCpu->iem.s.cXcptRecursions >= 3)
5095 {
5096#ifdef DEBUG_bird
5097 AssertFailed();
5098#endif
5099 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5100 }
5101
5102 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5103 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5104 {
5105 ....
5106 } */
5107 }
5108 pVCpu->iem.s.cXcptRecursions++;
5109 pVCpu->iem.s.uCurXcpt = u8Vector;
5110 pVCpu->iem.s.fCurXcpt = fFlags;
5111
5112 /*
5113 * Extensive logging.
5114 */
5115#if defined(LOG_ENABLED) && defined(IN_RING3)
5116 if (LogIs3Enabled())
5117 {
5118 PVM pVM = pVCpu->CTX_SUFF(pVM);
5119 char szRegs[4096];
5120 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5121 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5122 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5123 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5124 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5125 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5126 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5127 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5128 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5129 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5130 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5131 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5132 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5133 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5134 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5135 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5136 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5137 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5138 " efer=%016VR{efer}\n"
5139 " pat=%016VR{pat}\n"
5140 " sf_mask=%016VR{sf_mask}\n"
5141 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5142 " lstar=%016VR{lstar}\n"
5143 " star=%016VR{star} cstar=%016VR{cstar}\n"
5144 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5145 );
5146
5147 char szInstr[256];
5148 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5149 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5150 szInstr, sizeof(szInstr), NULL);
5151 Log3(("%s%s\n", szRegs, szInstr));
5152 }
5153#endif /* LOG_ENABLED */
5154
5155 /*
5156 * Call the mode specific worker function.
5157 */
5158 VBOXSTRICTRC rcStrict;
5159 if (!(pCtx->cr0 & X86_CR0_PE))
5160 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5161 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5162 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5163 else
5164 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5165
5166 /* Flush the prefetch buffer. */
5167#ifdef IEM_WITH_CODE_TLB
5168 pVCpu->iem.s.pbInstrBuf = NULL;
5169#else
5170 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5171#endif
5172
5173 /*
5174 * Unwind.
5175 */
5176 pVCpu->iem.s.cXcptRecursions--;
5177 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5178 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5179 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5180 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5181 return rcStrict;
5182}
5183
5184#ifdef IEM_WITH_SETJMP
5185/**
5186 * See iemRaiseXcptOrInt. Will not return.
5187 */
5188IEM_STATIC DECL_NO_RETURN(void)
5189iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5190 uint8_t cbInstr,
5191 uint8_t u8Vector,
5192 uint32_t fFlags,
5193 uint16_t uErr,
5194 uint64_t uCr2)
5195{
5196 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5197 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5198}
5199#endif
5200
5201
5202/** \#DE - 00. */
5203DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5204{
5205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5206}
5207
5208
5209/** \#DB - 01.
5210 * @note This automatically clear DR7.GD. */
5211DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5212{
5213 /** @todo set/clear RF. */
5214 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5215 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5216}
5217
5218
5219/** \#UD - 06. */
5220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5221{
5222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5223}
5224
5225
5226/** \#NM - 07. */
5227DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5228{
5229 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5230}
5231
5232
5233/** \#TS(err) - 0a. */
5234DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5235{
5236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5237}
5238
5239
5240/** \#TS(tr) - 0a. */
5241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5242{
5243 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5244 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5245}
5246
5247
5248/** \#TS(0) - 0a. */
5249DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5250{
5251 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5252 0, 0);
5253}
5254
5255
5256/** \#TS(err) - 0a. */
5257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5258{
5259 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5260 uSel & X86_SEL_MASK_OFF_RPL, 0);
5261}
5262
5263
5264/** \#NP(err) - 0b. */
5265DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5266{
5267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5268}
5269
5270
5271/** \#NP(seg) - 0b. */
5272DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5273{
5274 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5275 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5276}
5277
5278
5279/** \#NP(sel) - 0b. */
5280DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5281{
5282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5283 uSel & ~X86_SEL_RPL, 0);
5284}
5285
5286
5287/** \#SS(seg) - 0c. */
5288DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5289{
5290 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5291 uSel & ~X86_SEL_RPL, 0);
5292}
5293
5294
5295/** \#SS(err) - 0c. */
5296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5297{
5298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5299}
5300
5301
5302/** \#GP(n) - 0d. */
5303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5304{
5305 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5306}
5307
5308
5309/** \#GP(0) - 0d. */
5310DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5311{
5312 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5313}
5314
5315#ifdef IEM_WITH_SETJMP
5316/** \#GP(0) - 0d. */
5317DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5318{
5319 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5320}
5321#endif
5322
5323
5324/** \#GP(sel) - 0d. */
5325DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5326{
5327 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5328 Sel & ~X86_SEL_RPL, 0);
5329}
5330
5331
5332/** \#GP(0) - 0d. */
5333DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5334{
5335 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5336}
5337
5338
5339/** \#GP(sel) - 0d. */
5340DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5341{
5342 NOREF(iSegReg); NOREF(fAccess);
5343 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5344 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5345}
5346
5347#ifdef IEM_WITH_SETJMP
5348/** \#GP(sel) - 0d, longjmp. */
5349DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5350{
5351 NOREF(iSegReg); NOREF(fAccess);
5352 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5353 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5354}
5355#endif
5356
5357/** \#GP(sel) - 0d. */
5358DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5359{
5360 NOREF(Sel);
5361 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5362}
5363
5364#ifdef IEM_WITH_SETJMP
5365/** \#GP(sel) - 0d, longjmp. */
5366DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5367{
5368 NOREF(Sel);
5369 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5370}
5371#endif
5372
5373
5374/** \#GP(sel) - 0d. */
5375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5376{
5377 NOREF(iSegReg); NOREF(fAccess);
5378 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5379}
5380
5381#ifdef IEM_WITH_SETJMP
5382/** \#GP(sel) - 0d, longjmp. */
5383DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5384 uint32_t fAccess)
5385{
5386 NOREF(iSegReg); NOREF(fAccess);
5387 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5388}
5389#endif
5390
5391
5392/** \#PF(n) - 0e. */
5393DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5394{
5395 uint16_t uErr;
5396 switch (rc)
5397 {
5398 case VERR_PAGE_NOT_PRESENT:
5399 case VERR_PAGE_TABLE_NOT_PRESENT:
5400 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5401 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5402 uErr = 0;
5403 break;
5404
5405 default:
5406 AssertMsgFailed(("%Rrc\n", rc));
5407 case VERR_ACCESS_DENIED:
5408 uErr = X86_TRAP_PF_P;
5409 break;
5410
5411 /** @todo reserved */
5412 }
5413
5414 if (pVCpu->iem.s.uCpl == 3)
5415 uErr |= X86_TRAP_PF_US;
5416
5417 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5418 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5419 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5420 uErr |= X86_TRAP_PF_ID;
5421
5422#if 0 /* This is so much non-sense, really. Why was it done like that? */
5423 /* Note! RW access callers reporting a WRITE protection fault, will clear
5424 the READ flag before calling. So, read-modify-write accesses (RW)
5425 can safely be reported as READ faults. */
5426 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5427 uErr |= X86_TRAP_PF_RW;
5428#else
5429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5430 {
5431 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5432 uErr |= X86_TRAP_PF_RW;
5433 }
5434#endif
5435
5436 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5437 uErr, GCPtrWhere);
5438}
5439
5440#ifdef IEM_WITH_SETJMP
5441/** \#PF(n) - 0e, longjmp. */
5442IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5443{
5444 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5445}
5446#endif
5447
5448
5449/** \#MF(0) - 10. */
5450DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5451{
5452 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5453}
5454
5455
5456/** \#AC(0) - 11. */
5457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5458{
5459 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5460}
5461
5462
5463/**
5464 * Macro for calling iemCImplRaiseDivideError().
5465 *
5466 * This enables us to add/remove arguments and force different levels of
5467 * inlining as we wish.
5468 *
5469 * @return Strict VBox status code.
5470 */
5471#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5472IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5473{
5474 NOREF(cbInstr);
5475 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5476}
5477
5478
5479/**
5480 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5481 *
5482 * This enables us to add/remove arguments and force different levels of
5483 * inlining as we wish.
5484 *
5485 * @return Strict VBox status code.
5486 */
5487#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5488IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5489{
5490 NOREF(cbInstr);
5491 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5492}
5493
5494
5495/**
5496 * Macro for calling iemCImplRaiseInvalidOpcode().
5497 *
5498 * This enables us to add/remove arguments and force different levels of
5499 * inlining as we wish.
5500 *
5501 * @return Strict VBox status code.
5502 */
5503#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5504IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5505{
5506 NOREF(cbInstr);
5507 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5508}
5509
5510
5511/** @} */
5512
5513
5514/*
5515 *
5516 * Helpers routines.
5517 * Helpers routines.
5518 * Helpers routines.
5519 *
5520 */
5521
5522/**
5523 * Recalculates the effective operand size.
5524 *
5525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5526 */
5527IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5528{
5529 switch (pVCpu->iem.s.enmCpuMode)
5530 {
5531 case IEMMODE_16BIT:
5532 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5533 break;
5534 case IEMMODE_32BIT:
5535 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5536 break;
5537 case IEMMODE_64BIT:
5538 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5539 {
5540 case 0:
5541 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5542 break;
5543 case IEM_OP_PRF_SIZE_OP:
5544 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5545 break;
5546 case IEM_OP_PRF_SIZE_REX_W:
5547 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5548 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5549 break;
5550 }
5551 break;
5552 default:
5553 AssertFailed();
5554 }
5555}
5556
5557
5558/**
5559 * Sets the default operand size to 64-bit and recalculates the effective
5560 * operand size.
5561 *
5562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5563 */
5564IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5565{
5566 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5567 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5568 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5569 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5570 else
5571 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5572}
5573
5574
5575/*
5576 *
5577 * Common opcode decoders.
5578 * Common opcode decoders.
5579 * Common opcode decoders.
5580 *
5581 */
5582//#include <iprt/mem.h>
5583
5584/**
5585 * Used to add extra details about a stub case.
5586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5587 */
5588IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5589{
5590#if defined(LOG_ENABLED) && defined(IN_RING3)
5591 PVM pVM = pVCpu->CTX_SUFF(pVM);
5592 char szRegs[4096];
5593 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5594 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5595 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5596 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5597 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5598 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5599 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5600 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5601 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5602 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5603 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5604 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5605 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5606 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5607 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5608 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5609 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5610 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5611 " efer=%016VR{efer}\n"
5612 " pat=%016VR{pat}\n"
5613 " sf_mask=%016VR{sf_mask}\n"
5614 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5615 " lstar=%016VR{lstar}\n"
5616 " star=%016VR{star} cstar=%016VR{cstar}\n"
5617 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5618 );
5619
5620 char szInstr[256];
5621 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5622 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5623 szInstr, sizeof(szInstr), NULL);
5624
5625 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5626#else
5627 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5628#endif
5629}
5630
5631/**
5632 * Complains about a stub.
5633 *
5634 * Providing two versions of this macro, one for daily use and one for use when
5635 * working on IEM.
5636 */
5637#if 0
5638# define IEMOP_BITCH_ABOUT_STUB() \
5639 do { \
5640 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5641 iemOpStubMsg2(pVCpu); \
5642 RTAssertPanic(); \
5643 } while (0)
5644#else
5645# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5646#endif
5647
5648/** Stubs an opcode. */
5649#define FNIEMOP_STUB(a_Name) \
5650 FNIEMOP_DEF(a_Name) \
5651 { \
5652 RT_NOREF_PV(pVCpu); \
5653 IEMOP_BITCH_ABOUT_STUB(); \
5654 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5655 } \
5656 typedef int ignore_semicolon
5657
5658/** Stubs an opcode. */
5659#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5660 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5661 { \
5662 RT_NOREF_PV(pVCpu); \
5663 RT_NOREF_PV(a_Name0); \
5664 IEMOP_BITCH_ABOUT_STUB(); \
5665 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5666 } \
5667 typedef int ignore_semicolon
5668
5669/** Stubs an opcode which currently should raise \#UD. */
5670#define FNIEMOP_UD_STUB(a_Name) \
5671 FNIEMOP_DEF(a_Name) \
5672 { \
5673 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5674 return IEMOP_RAISE_INVALID_OPCODE(); \
5675 } \
5676 typedef int ignore_semicolon
5677
5678/** Stubs an opcode which currently should raise \#UD. */
5679#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5680 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5681 { \
5682 RT_NOREF_PV(pVCpu); \
5683 RT_NOREF_PV(a_Name0); \
5684 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5685 return IEMOP_RAISE_INVALID_OPCODE(); \
5686 } \
5687 typedef int ignore_semicolon
5688
5689
5690
5691/** @name Register Access.
5692 * @{
5693 */
5694
5695/**
5696 * Gets a reference (pointer) to the specified hidden segment register.
5697 *
5698 * @returns Hidden register reference.
5699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5700 * @param iSegReg The segment register.
5701 */
5702IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5703{
5704 Assert(iSegReg < X86_SREG_COUNT);
5705 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5706 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5707
5708#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5709 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5710 { /* likely */ }
5711 else
5712 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5713#else
5714 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5715#endif
5716 return pSReg;
5717}
5718
5719
5720/**
5721 * Ensures that the given hidden segment register is up to date.
5722 *
5723 * @returns Hidden register reference.
5724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5725 * @param pSReg The segment register.
5726 */
5727IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5728{
5729#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5730 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5731 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5732#else
5733 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5734 NOREF(pVCpu);
5735#endif
5736 return pSReg;
5737}
5738
5739
5740/**
5741 * Gets a reference (pointer) to the specified segment register (the selector
5742 * value).
5743 *
5744 * @returns Pointer to the selector variable.
5745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5746 * @param iSegReg The segment register.
5747 */
5748DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5749{
5750 Assert(iSegReg < X86_SREG_COUNT);
5751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5752 return &pCtx->aSRegs[iSegReg].Sel;
5753}
5754
5755
5756/**
5757 * Fetches the selector value of a segment register.
5758 *
5759 * @returns The selector value.
5760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5761 * @param iSegReg The segment register.
5762 */
5763DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5764{
5765 Assert(iSegReg < X86_SREG_COUNT);
5766 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5767}
5768
5769
5770/**
5771 * Gets a reference (pointer) to the specified general purpose register.
5772 *
5773 * @returns Register reference.
5774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5775 * @param iReg The general purpose register.
5776 */
5777DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5778{
5779 Assert(iReg < 16);
5780 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5781 return &pCtx->aGRegs[iReg];
5782}
5783
5784
5785/**
5786 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5787 *
5788 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5789 *
5790 * @returns Register reference.
5791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5792 * @param iReg The register.
5793 */
5794DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5795{
5796 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5797 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5798 {
5799 Assert(iReg < 16);
5800 return &pCtx->aGRegs[iReg].u8;
5801 }
5802 /* high 8-bit register. */
5803 Assert(iReg < 8);
5804 return &pCtx->aGRegs[iReg & 3].bHi;
5805}
5806
5807
5808/**
5809 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5810 *
5811 * @returns Register reference.
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param iReg The register.
5814 */
5815DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5816{
5817 Assert(iReg < 16);
5818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5819 return &pCtx->aGRegs[iReg].u16;
5820}
5821
5822
5823/**
5824 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5825 *
5826 * @returns Register reference.
5827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5828 * @param iReg The register.
5829 */
5830DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5831{
5832 Assert(iReg < 16);
5833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5834 return &pCtx->aGRegs[iReg].u32;
5835}
5836
5837
5838/**
5839 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5840 *
5841 * @returns Register reference.
5842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5843 * @param iReg The register.
5844 */
5845DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5846{
5847 Assert(iReg < 64);
5848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5849 return &pCtx->aGRegs[iReg].u64;
5850}
5851
5852
5853/**
5854 * Fetches the value of a 8-bit general purpose register.
5855 *
5856 * @returns The register value.
5857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5858 * @param iReg The register.
5859 */
5860DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5861{
5862 return *iemGRegRefU8(pVCpu, iReg);
5863}
5864
5865
5866/**
5867 * Fetches the value of a 16-bit general purpose register.
5868 *
5869 * @returns The register value.
5870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5871 * @param iReg The register.
5872 */
5873DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5874{
5875 Assert(iReg < 16);
5876 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5877}
5878
5879
5880/**
5881 * Fetches the value of a 32-bit general purpose register.
5882 *
5883 * @returns The register value.
5884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5885 * @param iReg The register.
5886 */
5887DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5888{
5889 Assert(iReg < 16);
5890 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5891}
5892
5893
5894/**
5895 * Fetches the value of a 64-bit general purpose register.
5896 *
5897 * @returns The register value.
5898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5899 * @param iReg The register.
5900 */
5901DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5902{
5903 Assert(iReg < 16);
5904 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5905}
5906
5907
5908/**
5909 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5910 *
5911 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5912 * segment limit.
5913 *
5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5915 * @param offNextInstr The offset of the next instruction.
5916 */
5917IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5918{
5919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5920 switch (pVCpu->iem.s.enmEffOpSize)
5921 {
5922 case IEMMODE_16BIT:
5923 {
5924 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5925 if ( uNewIp > pCtx->cs.u32Limit
5926 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5927 return iemRaiseGeneralProtectionFault0(pVCpu);
5928 pCtx->rip = uNewIp;
5929 break;
5930 }
5931
5932 case IEMMODE_32BIT:
5933 {
5934 Assert(pCtx->rip <= UINT32_MAX);
5935 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5936
5937 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5938 if (uNewEip > pCtx->cs.u32Limit)
5939 return iemRaiseGeneralProtectionFault0(pVCpu);
5940 pCtx->rip = uNewEip;
5941 break;
5942 }
5943
5944 case IEMMODE_64BIT:
5945 {
5946 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5947
5948 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5949 if (!IEM_IS_CANONICAL(uNewRip))
5950 return iemRaiseGeneralProtectionFault0(pVCpu);
5951 pCtx->rip = uNewRip;
5952 break;
5953 }
5954
5955 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5956 }
5957
5958 pCtx->eflags.Bits.u1RF = 0;
5959
5960#ifndef IEM_WITH_CODE_TLB
5961 /* Flush the prefetch buffer. */
5962 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5963#endif
5964
5965 return VINF_SUCCESS;
5966}
5967
5968
5969/**
5970 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5971 *
5972 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5973 * segment limit.
5974 *
5975 * @returns Strict VBox status code.
5976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5977 * @param offNextInstr The offset of the next instruction.
5978 */
5979IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5980{
5981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5982 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5983
5984 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5985 if ( uNewIp > pCtx->cs.u32Limit
5986 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5987 return iemRaiseGeneralProtectionFault0(pVCpu);
5988 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5989 pCtx->rip = uNewIp;
5990 pCtx->eflags.Bits.u1RF = 0;
5991
5992#ifndef IEM_WITH_CODE_TLB
5993 /* Flush the prefetch buffer. */
5994 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5995#endif
5996
5997 return VINF_SUCCESS;
5998}
5999
6000
6001/**
6002 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6003 *
6004 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6005 * segment limit.
6006 *
6007 * @returns Strict VBox status code.
6008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6009 * @param offNextInstr The offset of the next instruction.
6010 */
6011IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6012{
6013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6014 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6015
6016 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6017 {
6018 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6019
6020 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6021 if (uNewEip > pCtx->cs.u32Limit)
6022 return iemRaiseGeneralProtectionFault0(pVCpu);
6023 pCtx->rip = uNewEip;
6024 }
6025 else
6026 {
6027 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6028
6029 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6030 if (!IEM_IS_CANONICAL(uNewRip))
6031 return iemRaiseGeneralProtectionFault0(pVCpu);
6032 pCtx->rip = uNewRip;
6033 }
6034 pCtx->eflags.Bits.u1RF = 0;
6035
6036#ifndef IEM_WITH_CODE_TLB
6037 /* Flush the prefetch buffer. */
6038 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6039#endif
6040
6041 return VINF_SUCCESS;
6042}
6043
6044
6045/**
6046 * Performs a near jump to the specified address.
6047 *
6048 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6049 * segment limit.
6050 *
6051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6052 * @param uNewRip The new RIP value.
6053 */
6054IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6055{
6056 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6057 switch (pVCpu->iem.s.enmEffOpSize)
6058 {
6059 case IEMMODE_16BIT:
6060 {
6061 Assert(uNewRip <= UINT16_MAX);
6062 if ( uNewRip > pCtx->cs.u32Limit
6063 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6064 return iemRaiseGeneralProtectionFault0(pVCpu);
6065 /** @todo Test 16-bit jump in 64-bit mode. */
6066 pCtx->rip = uNewRip;
6067 break;
6068 }
6069
6070 case IEMMODE_32BIT:
6071 {
6072 Assert(uNewRip <= UINT32_MAX);
6073 Assert(pCtx->rip <= UINT32_MAX);
6074 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6075
6076 if (uNewRip > pCtx->cs.u32Limit)
6077 return iemRaiseGeneralProtectionFault0(pVCpu);
6078 pCtx->rip = uNewRip;
6079 break;
6080 }
6081
6082 case IEMMODE_64BIT:
6083 {
6084 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6085
6086 if (!IEM_IS_CANONICAL(uNewRip))
6087 return iemRaiseGeneralProtectionFault0(pVCpu);
6088 pCtx->rip = uNewRip;
6089 break;
6090 }
6091
6092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6093 }
6094
6095 pCtx->eflags.Bits.u1RF = 0;
6096
6097#ifndef IEM_WITH_CODE_TLB
6098 /* Flush the prefetch buffer. */
6099 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6100#endif
6101
6102 return VINF_SUCCESS;
6103}
6104
6105
6106/**
6107 * Get the address of the top of the stack.
6108 *
6109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6110 * @param pCtx The CPU context which SP/ESP/RSP should be
6111 * read.
6112 */
6113DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6114{
6115 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6116 return pCtx->rsp;
6117 if (pCtx->ss.Attr.n.u1DefBig)
6118 return pCtx->esp;
6119 return pCtx->sp;
6120}
6121
6122
6123/**
6124 * Updates the RIP/EIP/IP to point to the next instruction.
6125 *
6126 * This function leaves the EFLAGS.RF flag alone.
6127 *
6128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6129 * @param cbInstr The number of bytes to add.
6130 */
6131IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6132{
6133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6134 switch (pVCpu->iem.s.enmCpuMode)
6135 {
6136 case IEMMODE_16BIT:
6137 Assert(pCtx->rip <= UINT16_MAX);
6138 pCtx->eip += cbInstr;
6139 pCtx->eip &= UINT32_C(0xffff);
6140 break;
6141
6142 case IEMMODE_32BIT:
6143 pCtx->eip += cbInstr;
6144 Assert(pCtx->rip <= UINT32_MAX);
6145 break;
6146
6147 case IEMMODE_64BIT:
6148 pCtx->rip += cbInstr;
6149 break;
6150 default: AssertFailed();
6151 }
6152}
6153
6154
6155#if 0
6156/**
6157 * Updates the RIP/EIP/IP to point to the next instruction.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 */
6161IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6162{
6163 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6164}
6165#endif
6166
6167
6168
6169/**
6170 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6171 *
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param cbInstr The number of bytes to add.
6174 */
6175IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6176{
6177 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6178
6179 pCtx->eflags.Bits.u1RF = 0;
6180
6181 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6182#if ARCH_BITS >= 64
6183 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6184 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6185 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6186#else
6187 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6188 pCtx->rip += cbInstr;
6189 else
6190 {
6191 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6192 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6193 }
6194#endif
6195}
6196
6197
6198/**
6199 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6200 *
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 */
6203IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6204{
6205 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6206}
6207
6208
6209/**
6210 * Adds to the stack pointer.
6211 *
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param pCtx The CPU context which SP/ESP/RSP should be
6214 * updated.
6215 * @param cbToAdd The number of bytes to add (8-bit!).
6216 */
6217DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6218{
6219 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6220 pCtx->rsp += cbToAdd;
6221 else if (pCtx->ss.Attr.n.u1DefBig)
6222 pCtx->esp += cbToAdd;
6223 else
6224 pCtx->sp += cbToAdd;
6225}
6226
6227
6228/**
6229 * Subtracts from the stack pointer.
6230 *
6231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6232 * @param pCtx The CPU context which SP/ESP/RSP should be
6233 * updated.
6234 * @param cbToSub The number of bytes to subtract (8-bit!).
6235 */
6236DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6237{
6238 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6239 pCtx->rsp -= cbToSub;
6240 else if (pCtx->ss.Attr.n.u1DefBig)
6241 pCtx->esp -= cbToSub;
6242 else
6243 pCtx->sp -= cbToSub;
6244}
6245
6246
6247/**
6248 * Adds to the temporary stack pointer.
6249 *
6250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6251 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6252 * @param cbToAdd The number of bytes to add (16-bit).
6253 * @param pCtx Where to get the current stack mode.
6254 */
6255DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6256{
6257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6258 pTmpRsp->u += cbToAdd;
6259 else if (pCtx->ss.Attr.n.u1DefBig)
6260 pTmpRsp->DWords.dw0 += cbToAdd;
6261 else
6262 pTmpRsp->Words.w0 += cbToAdd;
6263}
6264
6265
6266/**
6267 * Subtracts from the temporary stack pointer.
6268 *
6269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6270 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6271 * @param cbToSub The number of bytes to subtract.
6272 * @param pCtx Where to get the current stack mode.
6273 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6274 * expecting that.
6275 */
6276DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6277{
6278 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6279 pTmpRsp->u -= cbToSub;
6280 else if (pCtx->ss.Attr.n.u1DefBig)
6281 pTmpRsp->DWords.dw0 -= cbToSub;
6282 else
6283 pTmpRsp->Words.w0 -= cbToSub;
6284}
6285
6286
6287/**
6288 * Calculates the effective stack address for a push of the specified size as
6289 * well as the new RSP value (upper bits may be masked).
6290 *
6291 * @returns Effective stack addressf for the push.
6292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6293 * @param pCtx Where to get the current stack mode.
6294 * @param cbItem The size of the stack item to pop.
6295 * @param puNewRsp Where to return the new RSP value.
6296 */
6297DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6298{
6299 RTUINT64U uTmpRsp;
6300 RTGCPTR GCPtrTop;
6301 uTmpRsp.u = pCtx->rsp;
6302
6303 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6304 GCPtrTop = uTmpRsp.u -= cbItem;
6305 else if (pCtx->ss.Attr.n.u1DefBig)
6306 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6307 else
6308 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6309 *puNewRsp = uTmpRsp.u;
6310 return GCPtrTop;
6311}
6312
6313
6314/**
6315 * Gets the current stack pointer and calculates the value after a pop of the
6316 * specified size.
6317 *
6318 * @returns Current stack pointer.
6319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6320 * @param pCtx Where to get the current stack mode.
6321 * @param cbItem The size of the stack item to pop.
6322 * @param puNewRsp Where to return the new RSP value.
6323 */
6324DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6325{
6326 RTUINT64U uTmpRsp;
6327 RTGCPTR GCPtrTop;
6328 uTmpRsp.u = pCtx->rsp;
6329
6330 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6331 {
6332 GCPtrTop = uTmpRsp.u;
6333 uTmpRsp.u += cbItem;
6334 }
6335 else if (pCtx->ss.Attr.n.u1DefBig)
6336 {
6337 GCPtrTop = uTmpRsp.DWords.dw0;
6338 uTmpRsp.DWords.dw0 += cbItem;
6339 }
6340 else
6341 {
6342 GCPtrTop = uTmpRsp.Words.w0;
6343 uTmpRsp.Words.w0 += cbItem;
6344 }
6345 *puNewRsp = uTmpRsp.u;
6346 return GCPtrTop;
6347}
6348
6349
6350/**
6351 * Calculates the effective stack address for a push of the specified size as
6352 * well as the new temporary RSP value (upper bits may be masked).
6353 *
6354 * @returns Effective stack addressf for the push.
6355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6356 * @param pCtx Where to get the current stack mode.
6357 * @param pTmpRsp The temporary stack pointer. This is updated.
6358 * @param cbItem The size of the stack item to pop.
6359 */
6360DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6361{
6362 RTGCPTR GCPtrTop;
6363
6364 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6365 GCPtrTop = pTmpRsp->u -= cbItem;
6366 else if (pCtx->ss.Attr.n.u1DefBig)
6367 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6368 else
6369 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6370 return GCPtrTop;
6371}
6372
6373
6374/**
6375 * Gets the effective stack address for a pop of the specified size and
6376 * calculates and updates the temporary RSP.
6377 *
6378 * @returns Current stack pointer.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param pCtx Where to get the current stack mode.
6381 * @param pTmpRsp The temporary stack pointer. This is updated.
6382 * @param cbItem The size of the stack item to pop.
6383 */
6384DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6385{
6386 RTGCPTR GCPtrTop;
6387 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6388 {
6389 GCPtrTop = pTmpRsp->u;
6390 pTmpRsp->u += cbItem;
6391 }
6392 else if (pCtx->ss.Attr.n.u1DefBig)
6393 {
6394 GCPtrTop = pTmpRsp->DWords.dw0;
6395 pTmpRsp->DWords.dw0 += cbItem;
6396 }
6397 else
6398 {
6399 GCPtrTop = pTmpRsp->Words.w0;
6400 pTmpRsp->Words.w0 += cbItem;
6401 }
6402 return GCPtrTop;
6403}
6404
6405/** @} */
6406
6407
6408/** @name FPU access and helpers.
6409 *
6410 * @{
6411 */
6412
6413
6414/**
6415 * Hook for preparing to use the host FPU.
6416 *
6417 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6418 *
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 */
6421DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6422{
6423#ifdef IN_RING3
6424 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6425#else
6426 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6427#endif
6428}
6429
6430
6431/**
6432 * Hook for preparing to use the host FPU for SSE
6433 *
6434 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6435 *
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 */
6438DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6439{
6440 iemFpuPrepareUsage(pVCpu);
6441}
6442
6443
6444/**
6445 * Hook for actualizing the guest FPU state before the interpreter reads it.
6446 *
6447 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6448 *
6449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6450 */
6451DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6452{
6453#ifdef IN_RING3
6454 NOREF(pVCpu);
6455#else
6456 CPUMRZFpuStateActualizeForRead(pVCpu);
6457#endif
6458}
6459
6460
6461/**
6462 * Hook for actualizing the guest FPU state before the interpreter changes it.
6463 *
6464 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6465 *
6466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6467 */
6468DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6469{
6470#ifdef IN_RING3
6471 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6472#else
6473 CPUMRZFpuStateActualizeForChange(pVCpu);
6474#endif
6475}
6476
6477
6478/**
6479 * Hook for actualizing the guest XMM0..15 register state for read only.
6480 *
6481 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6482 *
6483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6484 */
6485DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6486{
6487#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6488 NOREF(pVCpu);
6489#else
6490 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6491#endif
6492}
6493
6494
6495/**
6496 * Hook for actualizing the guest XMM0..15 register state for read+write.
6497 *
6498 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6499 *
6500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6501 */
6502DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6503{
6504#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6505 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6506#else
6507 CPUMRZFpuStateActualizeForChange(pVCpu);
6508#endif
6509}
6510
6511
6512/**
6513 * Stores a QNaN value into a FPU register.
6514 *
6515 * @param pReg Pointer to the register.
6516 */
6517DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6518{
6519 pReg->au32[0] = UINT32_C(0x00000000);
6520 pReg->au32[1] = UINT32_C(0xc0000000);
6521 pReg->au16[4] = UINT16_C(0xffff);
6522}
6523
6524
6525/**
6526 * Updates the FOP, FPU.CS and FPUIP registers.
6527 *
6528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6529 * @param pCtx The CPU context.
6530 * @param pFpuCtx The FPU context.
6531 */
6532DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6533{
6534 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6535 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6536 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6537 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6538 {
6539 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6540 * happens in real mode here based on the fnsave and fnstenv images. */
6541 pFpuCtx->CS = 0;
6542 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6543 }
6544 else
6545 {
6546 pFpuCtx->CS = pCtx->cs.Sel;
6547 pFpuCtx->FPUIP = pCtx->rip;
6548 }
6549}
6550
6551
6552/**
6553 * Updates the x87.DS and FPUDP registers.
6554 *
6555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6556 * @param pCtx The CPU context.
6557 * @param pFpuCtx The FPU context.
6558 * @param iEffSeg The effective segment register.
6559 * @param GCPtrEff The effective address relative to @a iEffSeg.
6560 */
6561DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6562{
6563 RTSEL sel;
6564 switch (iEffSeg)
6565 {
6566 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6567 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6568 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6569 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6570 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6571 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6572 default:
6573 AssertMsgFailed(("%d\n", iEffSeg));
6574 sel = pCtx->ds.Sel;
6575 }
6576 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6577 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6578 {
6579 pFpuCtx->DS = 0;
6580 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6581 }
6582 else
6583 {
6584 pFpuCtx->DS = sel;
6585 pFpuCtx->FPUDP = GCPtrEff;
6586 }
6587}
6588
6589
6590/**
6591 * Rotates the stack registers in the push direction.
6592 *
6593 * @param pFpuCtx The FPU context.
6594 * @remarks This is a complete waste of time, but fxsave stores the registers in
6595 * stack order.
6596 */
6597DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6598{
6599 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6600 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6601 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6602 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6603 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6604 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6605 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6606 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6607 pFpuCtx->aRegs[0].r80 = r80Tmp;
6608}
6609
6610
6611/**
6612 * Rotates the stack registers in the pop direction.
6613 *
6614 * @param pFpuCtx The FPU context.
6615 * @remarks This is a complete waste of time, but fxsave stores the registers in
6616 * stack order.
6617 */
6618DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6619{
6620 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6621 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6622 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6623 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6624 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6625 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6626 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6627 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6628 pFpuCtx->aRegs[7].r80 = r80Tmp;
6629}
6630
6631
6632/**
6633 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6634 * exception prevents it.
6635 *
6636 * @param pResult The FPU operation result to push.
6637 * @param pFpuCtx The FPU context.
6638 */
6639IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6640{
6641 /* Update FSW and bail if there are pending exceptions afterwards. */
6642 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6643 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6644 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6645 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6646 {
6647 pFpuCtx->FSW = fFsw;
6648 return;
6649 }
6650
6651 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6652 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6653 {
6654 /* All is fine, push the actual value. */
6655 pFpuCtx->FTW |= RT_BIT(iNewTop);
6656 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6657 }
6658 else if (pFpuCtx->FCW & X86_FCW_IM)
6659 {
6660 /* Masked stack overflow, push QNaN. */
6661 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6662 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6663 }
6664 else
6665 {
6666 /* Raise stack overflow, don't push anything. */
6667 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6668 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6669 return;
6670 }
6671
6672 fFsw &= ~X86_FSW_TOP_MASK;
6673 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6674 pFpuCtx->FSW = fFsw;
6675
6676 iemFpuRotateStackPush(pFpuCtx);
6677}
6678
6679
6680/**
6681 * Stores a result in a FPU register and updates the FSW and FTW.
6682 *
6683 * @param pFpuCtx The FPU context.
6684 * @param pResult The result to store.
6685 * @param iStReg Which FPU register to store it in.
6686 */
6687IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6688{
6689 Assert(iStReg < 8);
6690 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6691 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6692 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6693 pFpuCtx->FTW |= RT_BIT(iReg);
6694 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6695}
6696
6697
6698/**
6699 * Only updates the FPU status word (FSW) with the result of the current
6700 * instruction.
6701 *
6702 * @param pFpuCtx The FPU context.
6703 * @param u16FSW The FSW output of the current instruction.
6704 */
6705IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6706{
6707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6708 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6709}
6710
6711
6712/**
6713 * Pops one item off the FPU stack if no pending exception prevents it.
6714 *
6715 * @param pFpuCtx The FPU context.
6716 */
6717IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6718{
6719 /* Check pending exceptions. */
6720 uint16_t uFSW = pFpuCtx->FSW;
6721 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6722 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6723 return;
6724
6725 /* TOP--. */
6726 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6727 uFSW &= ~X86_FSW_TOP_MASK;
6728 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6729 pFpuCtx->FSW = uFSW;
6730
6731 /* Mark the previous ST0 as empty. */
6732 iOldTop >>= X86_FSW_TOP_SHIFT;
6733 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6734
6735 /* Rotate the registers. */
6736 iemFpuRotateStackPop(pFpuCtx);
6737}
6738
6739
6740/**
6741 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6742 *
6743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6744 * @param pResult The FPU operation result to push.
6745 */
6746IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6747{
6748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6749 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6750 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6751 iemFpuMaybePushResult(pResult, pFpuCtx);
6752}
6753
6754
6755/**
6756 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6757 * and sets FPUDP and FPUDS.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param pResult The FPU operation result to push.
6761 * @param iEffSeg The effective segment register.
6762 * @param GCPtrEff The effective address relative to @a iEffSeg.
6763 */
6764IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6765{
6766 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6767 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6768 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6769 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6770 iemFpuMaybePushResult(pResult, pFpuCtx);
6771}
6772
6773
6774/**
6775 * Replace ST0 with the first value and push the second onto the FPU stack,
6776 * unless a pending exception prevents it.
6777 *
6778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6779 * @param pResult The FPU operation result to store and push.
6780 */
6781IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6782{
6783 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6784 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6785 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6786
6787 /* Update FSW and bail if there are pending exceptions afterwards. */
6788 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6789 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6790 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6791 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6792 {
6793 pFpuCtx->FSW = fFsw;
6794 return;
6795 }
6796
6797 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6798 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6799 {
6800 /* All is fine, push the actual value. */
6801 pFpuCtx->FTW |= RT_BIT(iNewTop);
6802 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6803 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6804 }
6805 else if (pFpuCtx->FCW & X86_FCW_IM)
6806 {
6807 /* Masked stack overflow, push QNaN. */
6808 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6809 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6810 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6811 }
6812 else
6813 {
6814 /* Raise stack overflow, don't push anything. */
6815 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6816 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6817 return;
6818 }
6819
6820 fFsw &= ~X86_FSW_TOP_MASK;
6821 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6822 pFpuCtx->FSW = fFsw;
6823
6824 iemFpuRotateStackPush(pFpuCtx);
6825}
6826
6827
6828/**
6829 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6830 * FOP.
6831 *
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 * @param pResult The result to store.
6834 * @param iStReg Which FPU register to store it in.
6835 */
6836IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6837{
6838 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6839 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6840 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6841 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6842}
6843
6844
6845/**
6846 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6847 * FOP, and then pops the stack.
6848 *
6849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6850 * @param pResult The result to store.
6851 * @param iStReg Which FPU register to store it in.
6852 */
6853IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6854{
6855 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6856 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6857 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6858 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6859 iemFpuMaybePopOne(pFpuCtx);
6860}
6861
6862
6863/**
6864 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6865 * FPUDP, and FPUDS.
6866 *
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 * @param pResult The result to store.
6869 * @param iStReg Which FPU register to store it in.
6870 * @param iEffSeg The effective memory operand selector register.
6871 * @param GCPtrEff The effective memory operand offset.
6872 */
6873IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6874 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6875{
6876 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6878 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6879 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6880 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6881}
6882
6883
6884/**
6885 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6886 * FPUDP, and FPUDS, and then pops the stack.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 * @param pResult The result to store.
6890 * @param iStReg Which FPU register to store it in.
6891 * @param iEffSeg The effective memory operand selector register.
6892 * @param GCPtrEff The effective memory operand offset.
6893 */
6894IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6895 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6896{
6897 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6898 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6899 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6900 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6901 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6902 iemFpuMaybePopOne(pFpuCtx);
6903}
6904
6905
6906/**
6907 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6912{
6913 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6914 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6915 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6916}
6917
6918
6919/**
6920 * Marks the specified stack register as free (for FFREE).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 * @param iStReg The register to free.
6924 */
6925IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6926{
6927 Assert(iStReg < 8);
6928 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6929 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6930 pFpuCtx->FTW &= ~RT_BIT(iReg);
6931}
6932
6933
6934/**
6935 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6940{
6941 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6942 uint16_t uFsw = pFpuCtx->FSW;
6943 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6944 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6945 uFsw &= ~X86_FSW_TOP_MASK;
6946 uFsw |= uTop;
6947 pFpuCtx->FSW = uFsw;
6948}
6949
6950
6951/**
6952 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 */
6956IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6957{
6958 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6959 uint16_t uFsw = pFpuCtx->FSW;
6960 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6961 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6962 uFsw &= ~X86_FSW_TOP_MASK;
6963 uFsw |= uTop;
6964 pFpuCtx->FSW = uFsw;
6965}
6966
6967
6968/**
6969 * Updates the FSW, FOP, FPUIP, and FPUCS.
6970 *
6971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6972 * @param u16FSW The FSW from the current instruction.
6973 */
6974IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6975{
6976 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6977 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6978 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6979 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6980}
6981
6982
6983/**
6984 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6985 *
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 * @param u16FSW The FSW from the current instruction.
6988 */
6989IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6990{
6991 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6992 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6993 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6994 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6995 iemFpuMaybePopOne(pFpuCtx);
6996}
6997
6998
6999/**
7000 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7001 *
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 * @param u16FSW The FSW from the current instruction.
7004 * @param iEffSeg The effective memory operand selector register.
7005 * @param GCPtrEff The effective memory operand offset.
7006 */
7007IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7008{
7009 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7010 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7011 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7012 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7013 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7014}
7015
7016
7017/**
7018 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7019 *
7020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7021 * @param u16FSW The FSW from the current instruction.
7022 */
7023IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7024{
7025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7026 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7027 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7028 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7029 iemFpuMaybePopOne(pFpuCtx);
7030 iemFpuMaybePopOne(pFpuCtx);
7031}
7032
7033
7034/**
7035 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7036 *
7037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7038 * @param u16FSW The FSW from the current instruction.
7039 * @param iEffSeg The effective memory operand selector register.
7040 * @param GCPtrEff The effective memory operand offset.
7041 */
7042IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7043{
7044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7045 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7046 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7047 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7048 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7049 iemFpuMaybePopOne(pFpuCtx);
7050}
7051
7052
7053/**
7054 * Worker routine for raising an FPU stack underflow exception.
7055 *
7056 * @param pFpuCtx The FPU context.
7057 * @param iStReg The stack register being accessed.
7058 */
7059IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7060{
7061 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7062 if (pFpuCtx->FCW & X86_FCW_IM)
7063 {
7064 /* Masked underflow. */
7065 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7066 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7067 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7068 if (iStReg != UINT8_MAX)
7069 {
7070 pFpuCtx->FTW |= RT_BIT(iReg);
7071 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7072 }
7073 }
7074 else
7075 {
7076 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7077 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7078 }
7079}
7080
7081
7082/**
7083 * Raises a FPU stack underflow exception.
7084 *
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 * @param iStReg The destination register that should be loaded
7087 * with QNaN if \#IS is not masked. Specify
7088 * UINT8_MAX if none (like for fcom).
7089 */
7090DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7091{
7092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7093 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7094 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7095 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7096}
7097
7098
7099DECL_NO_INLINE(IEM_STATIC, void)
7100iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7101{
7102 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7103 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7104 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7105 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7106 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7107}
7108
7109
7110DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7111{
7112 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7113 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7114 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7115 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7116 iemFpuMaybePopOne(pFpuCtx);
7117}
7118
7119
7120DECL_NO_INLINE(IEM_STATIC, void)
7121iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7122{
7123 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7124 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7125 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7126 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7127 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7128 iemFpuMaybePopOne(pFpuCtx);
7129}
7130
7131
7132DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7133{
7134 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7135 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7136 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7137 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7138 iemFpuMaybePopOne(pFpuCtx);
7139 iemFpuMaybePopOne(pFpuCtx);
7140}
7141
7142
7143DECL_NO_INLINE(IEM_STATIC, void)
7144iemFpuStackPushUnderflow(PVMCPU pVCpu)
7145{
7146 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7147 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7148 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7149
7150 if (pFpuCtx->FCW & X86_FCW_IM)
7151 {
7152 /* Masked overflow - Push QNaN. */
7153 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7154 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7155 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7156 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7157 pFpuCtx->FTW |= RT_BIT(iNewTop);
7158 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7159 iemFpuRotateStackPush(pFpuCtx);
7160 }
7161 else
7162 {
7163 /* Exception pending - don't change TOP or the register stack. */
7164 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7165 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7166 }
7167}
7168
7169
7170DECL_NO_INLINE(IEM_STATIC, void)
7171iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7172{
7173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7174 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7176
7177 if (pFpuCtx->FCW & X86_FCW_IM)
7178 {
7179 /* Masked overflow - Push QNaN. */
7180 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7181 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7182 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7183 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7184 pFpuCtx->FTW |= RT_BIT(iNewTop);
7185 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7186 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7187 iemFpuRotateStackPush(pFpuCtx);
7188 }
7189 else
7190 {
7191 /* Exception pending - don't change TOP or the register stack. */
7192 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7193 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7194 }
7195}
7196
7197
7198/**
7199 * Worker routine for raising an FPU stack overflow exception on a push.
7200 *
7201 * @param pFpuCtx The FPU context.
7202 */
7203IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7204{
7205 if (pFpuCtx->FCW & X86_FCW_IM)
7206 {
7207 /* Masked overflow. */
7208 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7209 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7210 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7211 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7212 pFpuCtx->FTW |= RT_BIT(iNewTop);
7213 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7214 iemFpuRotateStackPush(pFpuCtx);
7215 }
7216 else
7217 {
7218 /* Exception pending - don't change TOP or the register stack. */
7219 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7220 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7221 }
7222}
7223
7224
7225/**
7226 * Raises a FPU stack overflow exception on a push.
7227 *
7228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7229 */
7230DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7231{
7232 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7233 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7234 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7235 iemFpuStackPushOverflowOnly(pFpuCtx);
7236}
7237
7238
7239/**
7240 * Raises a FPU stack overflow exception on a push with a memory operand.
7241 *
7242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7243 * @param iEffSeg The effective memory operand selector register.
7244 * @param GCPtrEff The effective memory operand offset.
7245 */
7246DECL_NO_INLINE(IEM_STATIC, void)
7247iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7248{
7249 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7250 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7251 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7252 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7253 iemFpuStackPushOverflowOnly(pFpuCtx);
7254}
7255
7256
7257IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7258{
7259 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7260 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7261 if (pFpuCtx->FTW & RT_BIT(iReg))
7262 return VINF_SUCCESS;
7263 return VERR_NOT_FOUND;
7264}
7265
7266
7267IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7268{
7269 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7270 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7271 if (pFpuCtx->FTW & RT_BIT(iReg))
7272 {
7273 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7274 return VINF_SUCCESS;
7275 }
7276 return VERR_NOT_FOUND;
7277}
7278
7279
7280IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7281 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7282{
7283 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7284 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7285 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7286 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7287 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7288 {
7289 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7290 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7291 return VINF_SUCCESS;
7292 }
7293 return VERR_NOT_FOUND;
7294}
7295
7296
7297IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7298{
7299 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7300 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7301 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7302 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7303 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7304 {
7305 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7306 return VINF_SUCCESS;
7307 }
7308 return VERR_NOT_FOUND;
7309}
7310
7311
7312/**
7313 * Updates the FPU exception status after FCW is changed.
7314 *
7315 * @param pFpuCtx The FPU context.
7316 */
7317IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7318{
7319 uint16_t u16Fsw = pFpuCtx->FSW;
7320 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7321 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7322 else
7323 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7324 pFpuCtx->FSW = u16Fsw;
7325}
7326
7327
7328/**
7329 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7330 *
7331 * @returns The full FTW.
7332 * @param pFpuCtx The FPU context.
7333 */
7334IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7335{
7336 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7337 uint16_t u16Ftw = 0;
7338 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7339 for (unsigned iSt = 0; iSt < 8; iSt++)
7340 {
7341 unsigned const iReg = (iSt + iTop) & 7;
7342 if (!(u8Ftw & RT_BIT(iReg)))
7343 u16Ftw |= 3 << (iReg * 2); /* empty */
7344 else
7345 {
7346 uint16_t uTag;
7347 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7348 if (pr80Reg->s.uExponent == 0x7fff)
7349 uTag = 2; /* Exponent is all 1's => Special. */
7350 else if (pr80Reg->s.uExponent == 0x0000)
7351 {
7352 if (pr80Reg->s.u64Mantissa == 0x0000)
7353 uTag = 1; /* All bits are zero => Zero. */
7354 else
7355 uTag = 2; /* Must be special. */
7356 }
7357 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7358 uTag = 0; /* Valid. */
7359 else
7360 uTag = 2; /* Must be special. */
7361
7362 u16Ftw |= uTag << (iReg * 2); /* empty */
7363 }
7364 }
7365
7366 return u16Ftw;
7367}
7368
7369
7370/**
7371 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7372 *
7373 * @returns The compressed FTW.
7374 * @param u16FullFtw The full FTW to convert.
7375 */
7376IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7377{
7378 uint8_t u8Ftw = 0;
7379 for (unsigned i = 0; i < 8; i++)
7380 {
7381 if ((u16FullFtw & 3) != 3 /*empty*/)
7382 u8Ftw |= RT_BIT(i);
7383 u16FullFtw >>= 2;
7384 }
7385
7386 return u8Ftw;
7387}
7388
7389/** @} */
7390
7391
7392/** @name Memory access.
7393 *
7394 * @{
7395 */
7396
7397
7398/**
7399 * Updates the IEMCPU::cbWritten counter if applicable.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 * @param fAccess The access being accounted for.
7403 * @param cbMem The access size.
7404 */
7405DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7406{
7407 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7408 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7409 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7410}
7411
7412
7413/**
7414 * Checks if the given segment can be written to, raise the appropriate
7415 * exception if not.
7416 *
7417 * @returns VBox strict status code.
7418 *
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param pHid Pointer to the hidden register.
7421 * @param iSegReg The register number.
7422 * @param pu64BaseAddr Where to return the base address to use for the
7423 * segment. (In 64-bit code it may differ from the
7424 * base in the hidden segment.)
7425 */
7426IEM_STATIC VBOXSTRICTRC
7427iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7428{
7429 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7430 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7431 else
7432 {
7433 if (!pHid->Attr.n.u1Present)
7434 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7435
7436 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7437 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7438 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7439 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7440 *pu64BaseAddr = pHid->u64Base;
7441 }
7442 return VINF_SUCCESS;
7443}
7444
7445
7446/**
7447 * Checks if the given segment can be read from, raise the appropriate
7448 * exception if not.
7449 *
7450 * @returns VBox strict status code.
7451 *
7452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7453 * @param pHid Pointer to the hidden register.
7454 * @param iSegReg The register number.
7455 * @param pu64BaseAddr Where to return the base address to use for the
7456 * segment. (In 64-bit code it may differ from the
7457 * base in the hidden segment.)
7458 */
7459IEM_STATIC VBOXSTRICTRC
7460iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7461{
7462 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7463 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7464 else
7465 {
7466 if (!pHid->Attr.n.u1Present)
7467 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7468
7469 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7470 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7471 *pu64BaseAddr = pHid->u64Base;
7472 }
7473 return VINF_SUCCESS;
7474}
7475
7476
7477/**
7478 * Applies the segment limit, base and attributes.
7479 *
7480 * This may raise a \#GP or \#SS.
7481 *
7482 * @returns VBox strict status code.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param fAccess The kind of access which is being performed.
7486 * @param iSegReg The index of the segment register to apply.
7487 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7488 * TSS, ++).
7489 * @param cbMem The access size.
7490 * @param pGCPtrMem Pointer to the guest memory address to apply
7491 * segmentation to. Input and output parameter.
7492 */
7493IEM_STATIC VBOXSTRICTRC
7494iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7495{
7496 if (iSegReg == UINT8_MAX)
7497 return VINF_SUCCESS;
7498
7499 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7500 switch (pVCpu->iem.s.enmCpuMode)
7501 {
7502 case IEMMODE_16BIT:
7503 case IEMMODE_32BIT:
7504 {
7505 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7506 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7507
7508 if ( pSel->Attr.n.u1Present
7509 && !pSel->Attr.n.u1Unusable)
7510 {
7511 Assert(pSel->Attr.n.u1DescType);
7512 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7513 {
7514 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7515 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7516 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7517
7518 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7519 {
7520 /** @todo CPL check. */
7521 }
7522
7523 /*
7524 * There are two kinds of data selectors, normal and expand down.
7525 */
7526 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7527 {
7528 if ( GCPtrFirst32 > pSel->u32Limit
7529 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7530 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7531 }
7532 else
7533 {
7534 /*
7535 * The upper boundary is defined by the B bit, not the G bit!
7536 */
7537 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7538 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7539 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7540 }
7541 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7542 }
7543 else
7544 {
7545
7546 /*
7547 * Code selector and usually be used to read thru, writing is
7548 * only permitted in real and V8086 mode.
7549 */
7550 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7551 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7552 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7553 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7554 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7555
7556 if ( GCPtrFirst32 > pSel->u32Limit
7557 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7558 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7559
7560 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7561 {
7562 /** @todo CPL check. */
7563 }
7564
7565 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7566 }
7567 }
7568 else
7569 return iemRaiseGeneralProtectionFault0(pVCpu);
7570 return VINF_SUCCESS;
7571 }
7572
7573 case IEMMODE_64BIT:
7574 {
7575 RTGCPTR GCPtrMem = *pGCPtrMem;
7576 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7577 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7578
7579 Assert(cbMem >= 1);
7580 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7581 return VINF_SUCCESS;
7582 return iemRaiseGeneralProtectionFault0(pVCpu);
7583 }
7584
7585 default:
7586 AssertFailedReturn(VERR_IEM_IPE_7);
7587 }
7588}
7589
7590
7591/**
7592 * Translates a virtual address to a physical physical address and checks if we
7593 * can access the page as specified.
7594 *
7595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7596 * @param GCPtrMem The virtual address.
7597 * @param fAccess The intended access.
7598 * @param pGCPhysMem Where to return the physical address.
7599 */
7600IEM_STATIC VBOXSTRICTRC
7601iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7602{
7603 /** @todo Need a different PGM interface here. We're currently using
7604 * generic / REM interfaces. this won't cut it for R0 & RC. */
7605 RTGCPHYS GCPhys;
7606 uint64_t fFlags;
7607 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7608 if (RT_FAILURE(rc))
7609 {
7610 /** @todo Check unassigned memory in unpaged mode. */
7611 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7612 *pGCPhysMem = NIL_RTGCPHYS;
7613 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7614 }
7615
7616 /* If the page is writable and does not have the no-exec bit set, all
7617 access is allowed. Otherwise we'll have to check more carefully... */
7618 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7619 {
7620 /* Write to read only memory? */
7621 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7622 && !(fFlags & X86_PTE_RW)
7623 && ( pVCpu->iem.s.uCpl == 3
7624 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7625 {
7626 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7627 *pGCPhysMem = NIL_RTGCPHYS;
7628 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7629 }
7630
7631 /* Kernel memory accessed by userland? */
7632 if ( !(fFlags & X86_PTE_US)
7633 && pVCpu->iem.s.uCpl == 3
7634 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7635 {
7636 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7637 *pGCPhysMem = NIL_RTGCPHYS;
7638 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7639 }
7640
7641 /* Executing non-executable memory? */
7642 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7643 && (fFlags & X86_PTE_PAE_NX)
7644 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7645 {
7646 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7647 *pGCPhysMem = NIL_RTGCPHYS;
7648 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7649 VERR_ACCESS_DENIED);
7650 }
7651 }
7652
7653 /*
7654 * Set the dirty / access flags.
7655 * ASSUMES this is set when the address is translated rather than on committ...
7656 */
7657 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7658 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7659 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7660 {
7661 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7662 AssertRC(rc2);
7663 }
7664
7665 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7666 *pGCPhysMem = GCPhys;
7667 return VINF_SUCCESS;
7668}
7669
7670
7671
7672/**
7673 * Maps a physical page.
7674 *
7675 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7677 * @param GCPhysMem The physical address.
7678 * @param fAccess The intended access.
7679 * @param ppvMem Where to return the mapping address.
7680 * @param pLock The PGM lock.
7681 */
7682IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7683{
7684#ifdef IEM_VERIFICATION_MODE_FULL
7685 /* Force the alternative path so we can ignore writes. */
7686 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7687 {
7688 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7689 {
7690 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7691 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7692 if (RT_FAILURE(rc2))
7693 pVCpu->iem.s.fProblematicMemory = true;
7694 }
7695 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7696 }
7697#endif
7698#ifdef IEM_LOG_MEMORY_WRITES
7699 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7700 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7701#endif
7702#ifdef IEM_VERIFICATION_MODE_MINIMAL
7703 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7704#endif
7705
7706 /** @todo This API may require some improving later. A private deal with PGM
7707 * regarding locking and unlocking needs to be struct. A couple of TLBs
7708 * living in PGM, but with publicly accessible inlined access methods
7709 * could perhaps be an even better solution. */
7710 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7711 GCPhysMem,
7712 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7713 pVCpu->iem.s.fBypassHandlers,
7714 ppvMem,
7715 pLock);
7716 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7717 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7718
7719#ifdef IEM_VERIFICATION_MODE_FULL
7720 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7721 pVCpu->iem.s.fProblematicMemory = true;
7722#endif
7723 return rc;
7724}
7725
7726
7727/**
7728 * Unmap a page previously mapped by iemMemPageMap.
7729 *
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param GCPhysMem The physical address.
7732 * @param fAccess The intended access.
7733 * @param pvMem What iemMemPageMap returned.
7734 * @param pLock The PGM lock.
7735 */
7736DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7737{
7738 NOREF(pVCpu);
7739 NOREF(GCPhysMem);
7740 NOREF(fAccess);
7741 NOREF(pvMem);
7742 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7743}
7744
7745
7746/**
7747 * Looks up a memory mapping entry.
7748 *
7749 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7751 * @param pvMem The memory address.
7752 * @param fAccess The access to.
7753 */
7754DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7755{
7756 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7757 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7758 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7759 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7760 return 0;
7761 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7762 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7763 return 1;
7764 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7765 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7766 return 2;
7767 return VERR_NOT_FOUND;
7768}
7769
7770
7771/**
7772 * Finds a free memmap entry when using iNextMapping doesn't work.
7773 *
7774 * @returns Memory mapping index, 1024 on failure.
7775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7776 */
7777IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7778{
7779 /*
7780 * The easy case.
7781 */
7782 if (pVCpu->iem.s.cActiveMappings == 0)
7783 {
7784 pVCpu->iem.s.iNextMapping = 1;
7785 return 0;
7786 }
7787
7788 /* There should be enough mappings for all instructions. */
7789 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7790
7791 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7792 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7793 return i;
7794
7795 AssertFailedReturn(1024);
7796}
7797
7798
7799/**
7800 * Commits a bounce buffer that needs writing back and unmaps it.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param iMemMap The index of the buffer to commit.
7805 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7806 * Always false in ring-3, obviously.
7807 */
7808IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7809{
7810 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7811 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7812#ifdef IN_RING3
7813 Assert(!fPostponeFail);
7814 RT_NOREF_PV(fPostponeFail);
7815#endif
7816
7817 /*
7818 * Do the writing.
7819 */
7820#ifndef IEM_VERIFICATION_MODE_MINIMAL
7821 PVM pVM = pVCpu->CTX_SUFF(pVM);
7822 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7823 && !IEM_VERIFICATION_ENABLED(pVCpu))
7824 {
7825 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7826 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7827 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7828 if (!pVCpu->iem.s.fBypassHandlers)
7829 {
7830 /*
7831 * Carefully and efficiently dealing with access handler return
7832 * codes make this a little bloated.
7833 */
7834 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7836 pbBuf,
7837 cbFirst,
7838 PGMACCESSORIGIN_IEM);
7839 if (rcStrict == VINF_SUCCESS)
7840 {
7841 if (cbSecond)
7842 {
7843 rcStrict = PGMPhysWrite(pVM,
7844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7845 pbBuf + cbFirst,
7846 cbSecond,
7847 PGMACCESSORIGIN_IEM);
7848 if (rcStrict == VINF_SUCCESS)
7849 { /* nothing */ }
7850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7851 {
7852 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7855 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7856 }
7857# ifndef IN_RING3
7858 else if (fPostponeFail)
7859 {
7860 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7863 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7864 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7865 return iemSetPassUpStatus(pVCpu, rcStrict);
7866 }
7867# endif
7868 else
7869 {
7870 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7873 return rcStrict;
7874 }
7875 }
7876 }
7877 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7878 {
7879 if (!cbSecond)
7880 {
7881 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7883 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7884 }
7885 else
7886 {
7887 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7889 pbBuf + cbFirst,
7890 cbSecond,
7891 PGMACCESSORIGIN_IEM);
7892 if (rcStrict2 == VINF_SUCCESS)
7893 {
7894 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7898 }
7899 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7900 {
7901 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7904 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7905 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7906 }
7907# ifndef IN_RING3
7908 else if (fPostponeFail)
7909 {
7910 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7913 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7914 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7915 return iemSetPassUpStatus(pVCpu, rcStrict);
7916 }
7917# endif
7918 else
7919 {
7920 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7923 return rcStrict2;
7924 }
7925 }
7926 }
7927# ifndef IN_RING3
7928 else if (fPostponeFail)
7929 {
7930 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7933 if (!cbSecond)
7934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7935 else
7936 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7937 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7938 return iemSetPassUpStatus(pVCpu, rcStrict);
7939 }
7940# endif
7941 else
7942 {
7943 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7945 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7946 return rcStrict;
7947 }
7948 }
7949 else
7950 {
7951 /*
7952 * No access handlers, much simpler.
7953 */
7954 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7955 if (RT_SUCCESS(rc))
7956 {
7957 if (cbSecond)
7958 {
7959 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7960 if (RT_SUCCESS(rc))
7961 { /* likely */ }
7962 else
7963 {
7964 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7965 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7966 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7967 return rc;
7968 }
7969 }
7970 }
7971 else
7972 {
7973 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7975 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7976 return rc;
7977 }
7978 }
7979 }
7980#endif
7981
7982#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7983 /*
7984 * Record the write(s).
7985 */
7986 if (!pVCpu->iem.s.fNoRem)
7987 {
7988 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7989 if (pEvtRec)
7990 {
7991 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7992 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7993 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7994 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7995 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7996 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7997 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7998 }
7999 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8000 {
8001 pEvtRec = iemVerifyAllocRecord(pVCpu);
8002 if (pEvtRec)
8003 {
8004 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8005 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8006 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8007 memcpy(pEvtRec->u.RamWrite.ab,
8008 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8009 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8010 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8011 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8012 }
8013 }
8014 }
8015#endif
8016#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8017 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8018 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8019 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8020 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8021 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8022 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8023
8024 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8025 g_cbIemWrote = cbWrote;
8026 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8027#endif
8028
8029 /*
8030 * Free the mapping entry.
8031 */
8032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8033 Assert(pVCpu->iem.s.cActiveMappings != 0);
8034 pVCpu->iem.s.cActiveMappings--;
8035 return VINF_SUCCESS;
8036}
8037
8038
8039/**
8040 * iemMemMap worker that deals with a request crossing pages.
8041 */
8042IEM_STATIC VBOXSTRICTRC
8043iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8044{
8045 /*
8046 * Do the address translations.
8047 */
8048 RTGCPHYS GCPhysFirst;
8049 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8050 if (rcStrict != VINF_SUCCESS)
8051 return rcStrict;
8052
8053 RTGCPHYS GCPhysSecond;
8054 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8055 fAccess, &GCPhysSecond);
8056 if (rcStrict != VINF_SUCCESS)
8057 return rcStrict;
8058 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8059
8060 PVM pVM = pVCpu->CTX_SUFF(pVM);
8061#ifdef IEM_VERIFICATION_MODE_FULL
8062 /*
8063 * Detect problematic memory when verifying so we can select
8064 * the right execution engine. (TLB: Redo this.)
8065 */
8066 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8067 {
8068 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8069 if (RT_SUCCESS(rc2))
8070 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8071 if (RT_FAILURE(rc2))
8072 pVCpu->iem.s.fProblematicMemory = true;
8073 }
8074#endif
8075
8076
8077 /*
8078 * Read in the current memory content if it's a read, execute or partial
8079 * write access.
8080 */
8081 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8082 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8083 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8084
8085 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8086 {
8087 if (!pVCpu->iem.s.fBypassHandlers)
8088 {
8089 /*
8090 * Must carefully deal with access handler status codes here,
8091 * makes the code a bit bloated.
8092 */
8093 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8094 if (rcStrict == VINF_SUCCESS)
8095 {
8096 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8097 if (rcStrict == VINF_SUCCESS)
8098 { /*likely */ }
8099 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8101 else
8102 {
8103 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8104 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8105 return rcStrict;
8106 }
8107 }
8108 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8109 {
8110 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8111 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8112 {
8113 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8114 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8115 }
8116 else
8117 {
8118 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8119 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8120 return rcStrict2;
8121 }
8122 }
8123 else
8124 {
8125 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8126 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8127 return rcStrict;
8128 }
8129 }
8130 else
8131 {
8132 /*
8133 * No informational status codes here, much more straight forward.
8134 */
8135 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8136 if (RT_SUCCESS(rc))
8137 {
8138 Assert(rc == VINF_SUCCESS);
8139 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8140 if (RT_SUCCESS(rc))
8141 Assert(rc == VINF_SUCCESS);
8142 else
8143 {
8144 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8145 return rc;
8146 }
8147 }
8148 else
8149 {
8150 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8151 return rc;
8152 }
8153 }
8154
8155#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8156 if ( !pVCpu->iem.s.fNoRem
8157 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8158 {
8159 /*
8160 * Record the reads.
8161 */
8162 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8163 if (pEvtRec)
8164 {
8165 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8166 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8167 pEvtRec->u.RamRead.cb = cbFirstPage;
8168 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8169 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8170 }
8171 pEvtRec = iemVerifyAllocRecord(pVCpu);
8172 if (pEvtRec)
8173 {
8174 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8175 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8176 pEvtRec->u.RamRead.cb = cbSecondPage;
8177 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8178 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8179 }
8180 }
8181#endif
8182 }
8183#ifdef VBOX_STRICT
8184 else
8185 memset(pbBuf, 0xcc, cbMem);
8186 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8187 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8188#endif
8189
8190 /*
8191 * Commit the bounce buffer entry.
8192 */
8193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8194 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8195 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8196 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8197 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8198 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8199 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8200 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8201 pVCpu->iem.s.cActiveMappings++;
8202
8203 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8204 *ppvMem = pbBuf;
8205 return VINF_SUCCESS;
8206}
8207
8208
8209/**
8210 * iemMemMap woker that deals with iemMemPageMap failures.
8211 */
8212IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8213 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8214{
8215 /*
8216 * Filter out conditions we can handle and the ones which shouldn't happen.
8217 */
8218 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8219 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8220 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8221 {
8222 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8223 return rcMap;
8224 }
8225 pVCpu->iem.s.cPotentialExits++;
8226
8227 /*
8228 * Read in the current memory content if it's a read, execute or partial
8229 * write access.
8230 */
8231 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8232 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8233 {
8234 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8235 memset(pbBuf, 0xff, cbMem);
8236 else
8237 {
8238 int rc;
8239 if (!pVCpu->iem.s.fBypassHandlers)
8240 {
8241 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8242 if (rcStrict == VINF_SUCCESS)
8243 { /* nothing */ }
8244 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8245 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8246 else
8247 {
8248 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8249 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8250 return rcStrict;
8251 }
8252 }
8253 else
8254 {
8255 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8256 if (RT_SUCCESS(rc))
8257 { /* likely */ }
8258 else
8259 {
8260 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8261 GCPhysFirst, rc));
8262 return rc;
8263 }
8264 }
8265 }
8266
8267#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8268 if ( !pVCpu->iem.s.fNoRem
8269 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8270 {
8271 /*
8272 * Record the read.
8273 */
8274 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8275 if (pEvtRec)
8276 {
8277 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8278 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8279 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8280 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8281 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8282 }
8283 }
8284#endif
8285 }
8286#ifdef VBOX_STRICT
8287 else
8288 memset(pbBuf, 0xcc, cbMem);
8289#endif
8290#ifdef VBOX_STRICT
8291 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8292 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8293#endif
8294
8295 /*
8296 * Commit the bounce buffer entry.
8297 */
8298 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8299 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8300 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8301 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8302 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8303 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8304 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8305 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8306 pVCpu->iem.s.cActiveMappings++;
8307
8308 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8309 *ppvMem = pbBuf;
8310 return VINF_SUCCESS;
8311}
8312
8313
8314
8315/**
8316 * Maps the specified guest memory for the given kind of access.
8317 *
8318 * This may be using bounce buffering of the memory if it's crossing a page
8319 * boundary or if there is an access handler installed for any of it. Because
8320 * of lock prefix guarantees, we're in for some extra clutter when this
8321 * happens.
8322 *
8323 * This may raise a \#GP, \#SS, \#PF or \#AC.
8324 *
8325 * @returns VBox strict status code.
8326 *
8327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8328 * @param ppvMem Where to return the pointer to the mapped
8329 * memory.
8330 * @param cbMem The number of bytes to map. This is usually 1,
8331 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8332 * string operations it can be up to a page.
8333 * @param iSegReg The index of the segment register to use for
8334 * this access. The base and limits are checked.
8335 * Use UINT8_MAX to indicate that no segmentation
8336 * is required (for IDT, GDT and LDT accesses).
8337 * @param GCPtrMem The address of the guest memory.
8338 * @param fAccess How the memory is being accessed. The
8339 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8340 * how to map the memory, while the
8341 * IEM_ACCESS_WHAT_XXX bit is used when raising
8342 * exceptions.
8343 */
8344IEM_STATIC VBOXSTRICTRC
8345iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8346{
8347 /*
8348 * Check the input and figure out which mapping entry to use.
8349 */
8350 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8351 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8352 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8353
8354 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8355 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8356 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8357 {
8358 iMemMap = iemMemMapFindFree(pVCpu);
8359 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8360 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8361 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8362 pVCpu->iem.s.aMemMappings[2].fAccess),
8363 VERR_IEM_IPE_9);
8364 }
8365
8366 /*
8367 * Map the memory, checking that we can actually access it. If something
8368 * slightly complicated happens, fall back on bounce buffering.
8369 */
8370 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8371 if (rcStrict != VINF_SUCCESS)
8372 return rcStrict;
8373
8374 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8375 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8376
8377 RTGCPHYS GCPhysFirst;
8378 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8379 if (rcStrict != VINF_SUCCESS)
8380 return rcStrict;
8381
8382 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8383 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8384 if (fAccess & IEM_ACCESS_TYPE_READ)
8385 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8386
8387 void *pvMem;
8388 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8389 if (rcStrict != VINF_SUCCESS)
8390 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8391
8392 /*
8393 * Fill in the mapping table entry.
8394 */
8395 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8396 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8397 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8398 pVCpu->iem.s.cActiveMappings++;
8399
8400 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8401 *ppvMem = pvMem;
8402 return VINF_SUCCESS;
8403}
8404
8405
8406/**
8407 * Commits the guest memory if bounce buffered and unmaps it.
8408 *
8409 * @returns Strict VBox status code.
8410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8411 * @param pvMem The mapping.
8412 * @param fAccess The kind of access.
8413 */
8414IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8415{
8416 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8417 AssertReturn(iMemMap >= 0, iMemMap);
8418
8419 /* If it's bounce buffered, we may need to write back the buffer. */
8420 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8421 {
8422 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8423 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8424 }
8425 /* Otherwise unlock it. */
8426 else
8427 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8428
8429 /* Free the entry. */
8430 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8431 Assert(pVCpu->iem.s.cActiveMappings != 0);
8432 pVCpu->iem.s.cActiveMappings--;
8433 return VINF_SUCCESS;
8434}
8435
8436#ifdef IEM_WITH_SETJMP
8437
8438/**
8439 * Maps the specified guest memory for the given kind of access, longjmp on
8440 * error.
8441 *
8442 * This may be using bounce buffering of the memory if it's crossing a page
8443 * boundary or if there is an access handler installed for any of it. Because
8444 * of lock prefix guarantees, we're in for some extra clutter when this
8445 * happens.
8446 *
8447 * This may raise a \#GP, \#SS, \#PF or \#AC.
8448 *
8449 * @returns Pointer to the mapped memory.
8450 *
8451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8452 * @param cbMem The number of bytes to map. This is usually 1,
8453 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8454 * string operations it can be up to a page.
8455 * @param iSegReg The index of the segment register to use for
8456 * this access. The base and limits are checked.
8457 * Use UINT8_MAX to indicate that no segmentation
8458 * is required (for IDT, GDT and LDT accesses).
8459 * @param GCPtrMem The address of the guest memory.
8460 * @param fAccess How the memory is being accessed. The
8461 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8462 * how to map the memory, while the
8463 * IEM_ACCESS_WHAT_XXX bit is used when raising
8464 * exceptions.
8465 */
8466IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8467{
8468 /*
8469 * Check the input and figure out which mapping entry to use.
8470 */
8471 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8472 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8473 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8474
8475 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8476 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8477 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8478 {
8479 iMemMap = iemMemMapFindFree(pVCpu);
8480 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8481 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8482 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8483 pVCpu->iem.s.aMemMappings[2].fAccess),
8484 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8485 }
8486
8487 /*
8488 * Map the memory, checking that we can actually access it. If something
8489 * slightly complicated happens, fall back on bounce buffering.
8490 */
8491 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8492 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8493 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8494
8495 /* Crossing a page boundary? */
8496 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8497 { /* No (likely). */ }
8498 else
8499 {
8500 void *pvMem;
8501 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8502 if (rcStrict == VINF_SUCCESS)
8503 return pvMem;
8504 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8505 }
8506
8507 RTGCPHYS GCPhysFirst;
8508 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8509 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8510 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8511
8512 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8513 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8514 if (fAccess & IEM_ACCESS_TYPE_READ)
8515 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8516
8517 void *pvMem;
8518 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8519 if (rcStrict == VINF_SUCCESS)
8520 { /* likely */ }
8521 else
8522 {
8523 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8524 if (rcStrict == VINF_SUCCESS)
8525 return pvMem;
8526 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8527 }
8528
8529 /*
8530 * Fill in the mapping table entry.
8531 */
8532 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8533 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8534 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8535 pVCpu->iem.s.cActiveMappings++;
8536
8537 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8538 return pvMem;
8539}
8540
8541
8542/**
8543 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8544 *
8545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8546 * @param pvMem The mapping.
8547 * @param fAccess The kind of access.
8548 */
8549IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8550{
8551 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8552 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8553
8554 /* If it's bounce buffered, we may need to write back the buffer. */
8555 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8556 {
8557 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8558 {
8559 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8560 if (rcStrict == VINF_SUCCESS)
8561 return;
8562 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8563 }
8564 }
8565 /* Otherwise unlock it. */
8566 else
8567 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8568
8569 /* Free the entry. */
8570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8571 Assert(pVCpu->iem.s.cActiveMappings != 0);
8572 pVCpu->iem.s.cActiveMappings--;
8573}
8574
8575#endif
8576
8577#ifndef IN_RING3
8578/**
8579 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8580 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8581 *
8582 * Allows the instruction to be completed and retired, while the IEM user will
8583 * return to ring-3 immediately afterwards and do the postponed writes there.
8584 *
8585 * @returns VBox status code (no strict statuses). Caller must check
8586 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8588 * @param pvMem The mapping.
8589 * @param fAccess The kind of access.
8590 */
8591IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8592{
8593 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8594 AssertReturn(iMemMap >= 0, iMemMap);
8595
8596 /* If it's bounce buffered, we may need to write back the buffer. */
8597 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8598 {
8599 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8600 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8601 }
8602 /* Otherwise unlock it. */
8603 else
8604 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8605
8606 /* Free the entry. */
8607 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8608 Assert(pVCpu->iem.s.cActiveMappings != 0);
8609 pVCpu->iem.s.cActiveMappings--;
8610 return VINF_SUCCESS;
8611}
8612#endif
8613
8614
8615/**
8616 * Rollbacks mappings, releasing page locks and such.
8617 *
8618 * The caller shall only call this after checking cActiveMappings.
8619 *
8620 * @returns Strict VBox status code to pass up.
8621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8622 */
8623IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8624{
8625 Assert(pVCpu->iem.s.cActiveMappings > 0);
8626
8627 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8628 while (iMemMap-- > 0)
8629 {
8630 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8631 if (fAccess != IEM_ACCESS_INVALID)
8632 {
8633 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8634 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8635 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8636 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8637 Assert(pVCpu->iem.s.cActiveMappings > 0);
8638 pVCpu->iem.s.cActiveMappings--;
8639 }
8640 }
8641}
8642
8643
8644/**
8645 * Fetches a data byte.
8646 *
8647 * @returns Strict VBox status code.
8648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8649 * @param pu8Dst Where to return the byte.
8650 * @param iSegReg The index of the segment register to use for
8651 * this access. The base and limits are checked.
8652 * @param GCPtrMem The address of the guest memory.
8653 */
8654IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8655{
8656 /* The lazy approach for now... */
8657 uint8_t const *pu8Src;
8658 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8659 if (rc == VINF_SUCCESS)
8660 {
8661 *pu8Dst = *pu8Src;
8662 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8663 }
8664 return rc;
8665}
8666
8667
8668#ifdef IEM_WITH_SETJMP
8669/**
8670 * Fetches a data byte, longjmp on error.
8671 *
8672 * @returns The byte.
8673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8674 * @param iSegReg The index of the segment register to use for
8675 * this access. The base and limits are checked.
8676 * @param GCPtrMem The address of the guest memory.
8677 */
8678DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8679{
8680 /* The lazy approach for now... */
8681 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8682 uint8_t const bRet = *pu8Src;
8683 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8684 return bRet;
8685}
8686#endif /* IEM_WITH_SETJMP */
8687
8688
8689/**
8690 * Fetches a data word.
8691 *
8692 * @returns Strict VBox status code.
8693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8694 * @param pu16Dst Where to return the word.
8695 * @param iSegReg The index of the segment register to use for
8696 * this access. The base and limits are checked.
8697 * @param GCPtrMem The address of the guest memory.
8698 */
8699IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8700{
8701 /* The lazy approach for now... */
8702 uint16_t const *pu16Src;
8703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8704 if (rc == VINF_SUCCESS)
8705 {
8706 *pu16Dst = *pu16Src;
8707 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8708 }
8709 return rc;
8710}
8711
8712
8713#ifdef IEM_WITH_SETJMP
8714/**
8715 * Fetches a data word, longjmp on error.
8716 *
8717 * @returns The word
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param iSegReg The index of the segment register to use for
8720 * this access. The base and limits are checked.
8721 * @param GCPtrMem The address of the guest memory.
8722 */
8723DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8724{
8725 /* The lazy approach for now... */
8726 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8727 uint16_t const u16Ret = *pu16Src;
8728 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8729 return u16Ret;
8730}
8731#endif
8732
8733
8734/**
8735 * Fetches a data dword.
8736 *
8737 * @returns Strict VBox status code.
8738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8739 * @param pu32Dst Where to return the dword.
8740 * @param iSegReg The index of the segment register to use for
8741 * this access. The base and limits are checked.
8742 * @param GCPtrMem The address of the guest memory.
8743 */
8744IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8745{
8746 /* The lazy approach for now... */
8747 uint32_t const *pu32Src;
8748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8749 if (rc == VINF_SUCCESS)
8750 {
8751 *pu32Dst = *pu32Src;
8752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8753 }
8754 return rc;
8755}
8756
8757
8758#ifdef IEM_WITH_SETJMP
8759
8760IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8761{
8762 Assert(cbMem >= 1);
8763 Assert(iSegReg < X86_SREG_COUNT);
8764
8765 /*
8766 * 64-bit mode is simpler.
8767 */
8768 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8769 {
8770 if (iSegReg >= X86_SREG_FS)
8771 {
8772 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8773 GCPtrMem += pSel->u64Base;
8774 }
8775
8776 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8777 return GCPtrMem;
8778 }
8779 /*
8780 * 16-bit and 32-bit segmentation.
8781 */
8782 else
8783 {
8784 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8785 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8786 == X86DESCATTR_P /* data, expand up */
8787 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8788 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8789 {
8790 /* expand up */
8791 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8792 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8793 && GCPtrLast32 > (uint32_t)GCPtrMem))
8794 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8795 }
8796 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8797 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8798 {
8799 /* expand down */
8800 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8801 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8802 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8803 && GCPtrLast32 > (uint32_t)GCPtrMem))
8804 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8805 }
8806 else
8807 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8808 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8809 }
8810 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8811}
8812
8813
8814IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8815{
8816 Assert(cbMem >= 1);
8817 Assert(iSegReg < X86_SREG_COUNT);
8818
8819 /*
8820 * 64-bit mode is simpler.
8821 */
8822 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8823 {
8824 if (iSegReg >= X86_SREG_FS)
8825 {
8826 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8827 GCPtrMem += pSel->u64Base;
8828 }
8829
8830 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8831 return GCPtrMem;
8832 }
8833 /*
8834 * 16-bit and 32-bit segmentation.
8835 */
8836 else
8837 {
8838 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8839 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8840 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8841 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8842 {
8843 /* expand up */
8844 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8845 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8846 && GCPtrLast32 > (uint32_t)GCPtrMem))
8847 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8848 }
8849 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8850 {
8851 /* expand down */
8852 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8853 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8854 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8855 && GCPtrLast32 > (uint32_t)GCPtrMem))
8856 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8857 }
8858 else
8859 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8860 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8861 }
8862 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8863}
8864
8865
8866/**
8867 * Fetches a data dword, longjmp on error, fallback/safe version.
8868 *
8869 * @returns The dword
8870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8871 * @param iSegReg The index of the segment register to use for
8872 * this access. The base and limits are checked.
8873 * @param GCPtrMem The address of the guest memory.
8874 */
8875IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8876{
8877 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8878 uint32_t const u32Ret = *pu32Src;
8879 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8880 return u32Ret;
8881}
8882
8883
8884/**
8885 * Fetches a data dword, longjmp on error.
8886 *
8887 * @returns The dword
8888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8889 * @param iSegReg The index of the segment register to use for
8890 * this access. The base and limits are checked.
8891 * @param GCPtrMem The address of the guest memory.
8892 */
8893DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8894{
8895# ifdef IEM_WITH_DATA_TLB
8896 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8897 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8898 {
8899 /// @todo more later.
8900 }
8901
8902 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8903# else
8904 /* The lazy approach. */
8905 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8906 uint32_t const u32Ret = *pu32Src;
8907 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8908 return u32Ret;
8909# endif
8910}
8911#endif
8912
8913
8914#ifdef SOME_UNUSED_FUNCTION
8915/**
8916 * Fetches a data dword and sign extends it to a qword.
8917 *
8918 * @returns Strict VBox status code.
8919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8920 * @param pu64Dst Where to return the sign extended value.
8921 * @param iSegReg The index of the segment register to use for
8922 * this access. The base and limits are checked.
8923 * @param GCPtrMem The address of the guest memory.
8924 */
8925IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8926{
8927 /* The lazy approach for now... */
8928 int32_t const *pi32Src;
8929 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8930 if (rc == VINF_SUCCESS)
8931 {
8932 *pu64Dst = *pi32Src;
8933 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8934 }
8935#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8936 else
8937 *pu64Dst = 0;
8938#endif
8939 return rc;
8940}
8941#endif
8942
8943
8944/**
8945 * Fetches a data qword.
8946 *
8947 * @returns Strict VBox status code.
8948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8949 * @param pu64Dst Where to return the qword.
8950 * @param iSegReg The index of the segment register to use for
8951 * this access. The base and limits are checked.
8952 * @param GCPtrMem The address of the guest memory.
8953 */
8954IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8955{
8956 /* The lazy approach for now... */
8957 uint64_t const *pu64Src;
8958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8959 if (rc == VINF_SUCCESS)
8960 {
8961 *pu64Dst = *pu64Src;
8962 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8963 }
8964 return rc;
8965}
8966
8967
8968#ifdef IEM_WITH_SETJMP
8969/**
8970 * Fetches a data qword, longjmp on error.
8971 *
8972 * @returns The qword.
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param iSegReg The index of the segment register to use for
8975 * this access. The base and limits are checked.
8976 * @param GCPtrMem The address of the guest memory.
8977 */
8978DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8979{
8980 /* The lazy approach for now... */
8981 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8982 uint64_t const u64Ret = *pu64Src;
8983 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8984 return u64Ret;
8985}
8986#endif
8987
8988
8989/**
8990 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8991 *
8992 * @returns Strict VBox status code.
8993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8994 * @param pu64Dst Where to return the qword.
8995 * @param iSegReg The index of the segment register to use for
8996 * this access. The base and limits are checked.
8997 * @param GCPtrMem The address of the guest memory.
8998 */
8999IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9000{
9001 /* The lazy approach for now... */
9002 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9003 if (RT_UNLIKELY(GCPtrMem & 15))
9004 return iemRaiseGeneralProtectionFault0(pVCpu);
9005
9006 uint64_t const *pu64Src;
9007 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9008 if (rc == VINF_SUCCESS)
9009 {
9010 *pu64Dst = *pu64Src;
9011 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9012 }
9013 return rc;
9014}
9015
9016
9017#ifdef IEM_WITH_SETJMP
9018/**
9019 * Fetches a data qword, longjmp on error.
9020 *
9021 * @returns The qword.
9022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9023 * @param iSegReg The index of the segment register to use for
9024 * this access. The base and limits are checked.
9025 * @param GCPtrMem The address of the guest memory.
9026 */
9027DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9028{
9029 /* The lazy approach for now... */
9030 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9031 if (RT_LIKELY(!(GCPtrMem & 15)))
9032 {
9033 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9034 uint64_t const u64Ret = *pu64Src;
9035 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9036 return u64Ret;
9037 }
9038
9039 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9040 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9041}
9042#endif
9043
9044
9045/**
9046 * Fetches a data tword.
9047 *
9048 * @returns Strict VBox status code.
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pr80Dst Where to return the tword.
9051 * @param iSegReg The index of the segment register to use for
9052 * this access. The base and limits are checked.
9053 * @param GCPtrMem The address of the guest memory.
9054 */
9055IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9056{
9057 /* The lazy approach for now... */
9058 PCRTFLOAT80U pr80Src;
9059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9060 if (rc == VINF_SUCCESS)
9061 {
9062 *pr80Dst = *pr80Src;
9063 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9064 }
9065 return rc;
9066}
9067
9068
9069#ifdef IEM_WITH_SETJMP
9070/**
9071 * Fetches a data tword, longjmp on error.
9072 *
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pr80Dst Where to return the tword.
9075 * @param iSegReg The index of the segment register to use for
9076 * this access. The base and limits are checked.
9077 * @param GCPtrMem The address of the guest memory.
9078 */
9079DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9080{
9081 /* The lazy approach for now... */
9082 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9083 *pr80Dst = *pr80Src;
9084 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9085}
9086#endif
9087
9088
9089/**
9090 * Fetches a data dqword (double qword), generally SSE related.
9091 *
9092 * @returns Strict VBox status code.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param pu128Dst Where to return the qword.
9095 * @param iSegReg The index of the segment register to use for
9096 * this access. The base and limits are checked.
9097 * @param GCPtrMem The address of the guest memory.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9100{
9101 /* The lazy approach for now... */
9102 uint128_t const *pu128Src;
9103 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9104 if (rc == VINF_SUCCESS)
9105 {
9106 *pu128Dst = *pu128Src;
9107 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9108 }
9109 return rc;
9110}
9111
9112
9113#ifdef IEM_WITH_SETJMP
9114/**
9115 * Fetches a data dqword (double qword), generally SSE related.
9116 *
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param pu128Dst Where to return the qword.
9119 * @param iSegReg The index of the segment register to use for
9120 * this access. The base and limits are checked.
9121 * @param GCPtrMem The address of the guest memory.
9122 */
9123IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9124{
9125 /* The lazy approach for now... */
9126 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9127 *pu128Dst = *pu128Src;
9128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9129}
9130#endif
9131
9132
9133/**
9134 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9135 * related.
9136 *
9137 * Raises \#GP(0) if not aligned.
9138 *
9139 * @returns Strict VBox status code.
9140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9141 * @param pu128Dst Where to return the qword.
9142 * @param iSegReg The index of the segment register to use for
9143 * this access. The base and limits are checked.
9144 * @param GCPtrMem The address of the guest memory.
9145 */
9146IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9147{
9148 /* The lazy approach for now... */
9149 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9150 if ( (GCPtrMem & 15)
9151 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9152 return iemRaiseGeneralProtectionFault0(pVCpu);
9153
9154 uint128_t const *pu128Src;
9155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9156 if (rc == VINF_SUCCESS)
9157 {
9158 *pu128Dst = *pu128Src;
9159 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9160 }
9161 return rc;
9162}
9163
9164
9165#ifdef IEM_WITH_SETJMP
9166/**
9167 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9168 * related, longjmp on error.
9169 *
9170 * Raises \#GP(0) if not aligned.
9171 *
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pu128Dst Where to return the qword.
9174 * @param iSegReg The index of the segment register to use for
9175 * this access. The base and limits are checked.
9176 * @param GCPtrMem The address of the guest memory.
9177 */
9178DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9179{
9180 /* The lazy approach for now... */
9181 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9182 if ( (GCPtrMem & 15) == 0
9183 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9184 {
9185 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9186 IEM_ACCESS_DATA_R);
9187 *pu128Dst = *pu128Src;
9188 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9189 return;
9190 }
9191
9192 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9193 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9194}
9195#endif
9196
9197
9198
9199/**
9200 * Fetches a descriptor register (lgdt, lidt).
9201 *
9202 * @returns Strict VBox status code.
9203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9204 * @param pcbLimit Where to return the limit.
9205 * @param pGCPtrBase Where to return the base.
9206 * @param iSegReg The index of the segment register to use for
9207 * this access. The base and limits are checked.
9208 * @param GCPtrMem The address of the guest memory.
9209 * @param enmOpSize The effective operand size.
9210 */
9211IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9212 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9213{
9214 /*
9215 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9216 * little special:
9217 * - The two reads are done separately.
9218 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9219 * - We suspect the 386 to actually commit the limit before the base in
9220 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9221 * don't try emulate this eccentric behavior, because it's not well
9222 * enough understood and rather hard to trigger.
9223 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9224 */
9225 VBOXSTRICTRC rcStrict;
9226 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9227 {
9228 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9229 if (rcStrict == VINF_SUCCESS)
9230 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9231 }
9232 else
9233 {
9234 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9235 if (enmOpSize == IEMMODE_32BIT)
9236 {
9237 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9238 {
9239 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9240 if (rcStrict == VINF_SUCCESS)
9241 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9242 }
9243 else
9244 {
9245 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9246 if (rcStrict == VINF_SUCCESS)
9247 {
9248 *pcbLimit = (uint16_t)uTmp;
9249 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9250 }
9251 }
9252 if (rcStrict == VINF_SUCCESS)
9253 *pGCPtrBase = uTmp;
9254 }
9255 else
9256 {
9257 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9258 if (rcStrict == VINF_SUCCESS)
9259 {
9260 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9261 if (rcStrict == VINF_SUCCESS)
9262 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9263 }
9264 }
9265 }
9266 return rcStrict;
9267}
9268
9269
9270
9271/**
9272 * Stores a data byte.
9273 *
9274 * @returns Strict VBox status code.
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param iSegReg The index of the segment register to use for
9277 * this access. The base and limits are checked.
9278 * @param GCPtrMem The address of the guest memory.
9279 * @param u8Value The value to store.
9280 */
9281IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9282{
9283 /* The lazy approach for now... */
9284 uint8_t *pu8Dst;
9285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9286 if (rc == VINF_SUCCESS)
9287 {
9288 *pu8Dst = u8Value;
9289 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9290 }
9291 return rc;
9292}
9293
9294
9295#ifdef IEM_WITH_SETJMP
9296/**
9297 * Stores a data byte, longjmp on error.
9298 *
9299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 * @param u8Value The value to store.
9304 */
9305IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9306{
9307 /* The lazy approach for now... */
9308 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9309 *pu8Dst = u8Value;
9310 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9311}
9312#endif
9313
9314
9315/**
9316 * Stores a data word.
9317 *
9318 * @returns Strict VBox status code.
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param iSegReg The index of the segment register to use for
9321 * this access. The base and limits are checked.
9322 * @param GCPtrMem The address of the guest memory.
9323 * @param u16Value The value to store.
9324 */
9325IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9326{
9327 /* The lazy approach for now... */
9328 uint16_t *pu16Dst;
9329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9330 if (rc == VINF_SUCCESS)
9331 {
9332 *pu16Dst = u16Value;
9333 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9334 }
9335 return rc;
9336}
9337
9338
9339#ifdef IEM_WITH_SETJMP
9340/**
9341 * Stores a data word, longjmp on error.
9342 *
9343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9344 * @param iSegReg The index of the segment register to use for
9345 * this access. The base and limits are checked.
9346 * @param GCPtrMem The address of the guest memory.
9347 * @param u16Value The value to store.
9348 */
9349IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9350{
9351 /* The lazy approach for now... */
9352 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9353 *pu16Dst = u16Value;
9354 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9355}
9356#endif
9357
9358
9359/**
9360 * Stores a data dword.
9361 *
9362 * @returns Strict VBox status code.
9363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9364 * @param iSegReg The index of the segment register to use for
9365 * this access. The base and limits are checked.
9366 * @param GCPtrMem The address of the guest memory.
9367 * @param u32Value The value to store.
9368 */
9369IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9370{
9371 /* The lazy approach for now... */
9372 uint32_t *pu32Dst;
9373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9374 if (rc == VINF_SUCCESS)
9375 {
9376 *pu32Dst = u32Value;
9377 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9378 }
9379 return rc;
9380}
9381
9382
9383#ifdef IEM_WITH_SETJMP
9384/**
9385 * Stores a data dword.
9386 *
9387 * @returns Strict VBox status code.
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 * @param u32Value The value to store.
9393 */
9394IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9395{
9396 /* The lazy approach for now... */
9397 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9398 *pu32Dst = u32Value;
9399 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9400}
9401#endif
9402
9403
9404/**
9405 * Stores a data qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 * @param u64Value The value to store.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9415{
9416 /* The lazy approach for now... */
9417 uint64_t *pu64Dst;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = u64Value;
9422 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9423 }
9424 return rc;
9425}
9426
9427
9428#ifdef IEM_WITH_SETJMP
9429/**
9430 * Stores a data qword, longjmp on error.
9431 *
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 * @param u64Value The value to store.
9437 */
9438IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9439{
9440 /* The lazy approach for now... */
9441 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9442 *pu64Dst = u64Value;
9443 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9444}
9445#endif
9446
9447
9448/**
9449 * Stores a data dqword.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 * @param u128Value The value to store.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9459{
9460 /* The lazy approach for now... */
9461 uint128_t *pu128Dst;
9462 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9463 if (rc == VINF_SUCCESS)
9464 {
9465 *pu128Dst = u128Value;
9466 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9467 }
9468 return rc;
9469}
9470
9471
9472#ifdef IEM_WITH_SETJMP
9473/**
9474 * Stores a data dqword, longjmp on error.
9475 *
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param iSegReg The index of the segment register to use for
9478 * this access. The base and limits are checked.
9479 * @param GCPtrMem The address of the guest memory.
9480 * @param u128Value The value to store.
9481 */
9482IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9483{
9484 /* The lazy approach for now... */
9485 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9486 *pu128Dst = u128Value;
9487 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9488}
9489#endif
9490
9491
9492/**
9493 * Stores a data dqword, SSE aligned.
9494 *
9495 * @returns Strict VBox status code.
9496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9497 * @param iSegReg The index of the segment register to use for
9498 * this access. The base and limits are checked.
9499 * @param GCPtrMem The address of the guest memory.
9500 * @param u128Value The value to store.
9501 */
9502IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9503{
9504 /* The lazy approach for now... */
9505 if ( (GCPtrMem & 15)
9506 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9507 return iemRaiseGeneralProtectionFault0(pVCpu);
9508
9509 uint128_t *pu128Dst;
9510 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9511 if (rc == VINF_SUCCESS)
9512 {
9513 *pu128Dst = u128Value;
9514 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9515 }
9516 return rc;
9517}
9518
9519
9520#ifdef IEM_WITH_SETJMP
9521/**
9522 * Stores a data dqword, SSE aligned.
9523 *
9524 * @returns Strict VBox status code.
9525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 * @param u128Value The value to store.
9530 */
9531DECL_NO_INLINE(IEM_STATIC, void)
9532iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9533{
9534 /* The lazy approach for now... */
9535 if ( (GCPtrMem & 15) == 0
9536 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9537 {
9538 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9539 *pu128Dst = u128Value;
9540 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9541 return;
9542 }
9543
9544 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9545 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9546}
9547#endif
9548
9549
9550/**
9551 * Stores a descriptor register (sgdt, sidt).
9552 *
9553 * @returns Strict VBox status code.
9554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9555 * @param cbLimit The limit.
9556 * @param GCPtrBase The base address.
9557 * @param iSegReg The index of the segment register to use for
9558 * this access. The base and limits are checked.
9559 * @param GCPtrMem The address of the guest memory.
9560 */
9561IEM_STATIC VBOXSTRICTRC
9562iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9563{
9564 /*
9565 * The SIDT and SGDT instructions actually stores the data using two
9566 * independent writes. The instructions does not respond to opsize prefixes.
9567 */
9568 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9569 if (rcStrict == VINF_SUCCESS)
9570 {
9571 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9572 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9573 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9574 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9575 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9576 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9577 else
9578 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9579 }
9580 return rcStrict;
9581}
9582
9583
9584/**
9585 * Pushes a word onto the stack.
9586 *
9587 * @returns Strict VBox status code.
9588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9589 * @param u16Value The value to push.
9590 */
9591IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9592{
9593 /* Increment the stack pointer. */
9594 uint64_t uNewRsp;
9595 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9596 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9597
9598 /* Write the word the lazy way. */
9599 uint16_t *pu16Dst;
9600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9601 if (rc == VINF_SUCCESS)
9602 {
9603 *pu16Dst = u16Value;
9604 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9605 }
9606
9607 /* Commit the new RSP value unless we an access handler made trouble. */
9608 if (rc == VINF_SUCCESS)
9609 pCtx->rsp = uNewRsp;
9610
9611 return rc;
9612}
9613
9614
9615/**
9616 * Pushes a dword onto the stack.
9617 *
9618 * @returns Strict VBox status code.
9619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9620 * @param u32Value The value to push.
9621 */
9622IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9623{
9624 /* Increment the stack pointer. */
9625 uint64_t uNewRsp;
9626 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9627 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9628
9629 /* Write the dword the lazy way. */
9630 uint32_t *pu32Dst;
9631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9632 if (rc == VINF_SUCCESS)
9633 {
9634 *pu32Dst = u32Value;
9635 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9636 }
9637
9638 /* Commit the new RSP value unless we an access handler made trouble. */
9639 if (rc == VINF_SUCCESS)
9640 pCtx->rsp = uNewRsp;
9641
9642 return rc;
9643}
9644
9645
9646/**
9647 * Pushes a dword segment register value onto the stack.
9648 *
9649 * @returns Strict VBox status code.
9650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9651 * @param u32Value The value to push.
9652 */
9653IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9654{
9655 /* Increment the stack pointer. */
9656 uint64_t uNewRsp;
9657 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9658 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9659
9660 VBOXSTRICTRC rc;
9661 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9662 {
9663 /* The recompiler writes a full dword. */
9664 uint32_t *pu32Dst;
9665 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9666 if (rc == VINF_SUCCESS)
9667 {
9668 *pu32Dst = u32Value;
9669 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9670 }
9671 }
9672 else
9673 {
9674 /* The intel docs talks about zero extending the selector register
9675 value. My actual intel CPU here might be zero extending the value
9676 but it still only writes the lower word... */
9677 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9678 * happens when crossing an electric page boundrary, is the high word checked
9679 * for write accessibility or not? Probably it is. What about segment limits?
9680 * It appears this behavior is also shared with trap error codes.
9681 *
9682 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9683 * ancient hardware when it actually did change. */
9684 uint16_t *pu16Dst;
9685 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9686 if (rc == VINF_SUCCESS)
9687 {
9688 *pu16Dst = (uint16_t)u32Value;
9689 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9690 }
9691 }
9692
9693 /* Commit the new RSP value unless we an access handler made trouble. */
9694 if (rc == VINF_SUCCESS)
9695 pCtx->rsp = uNewRsp;
9696
9697 return rc;
9698}
9699
9700
9701/**
9702 * Pushes a qword onto the stack.
9703 *
9704 * @returns Strict VBox status code.
9705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9706 * @param u64Value The value to push.
9707 */
9708IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9709{
9710 /* Increment the stack pointer. */
9711 uint64_t uNewRsp;
9712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9713 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9714
9715 /* Write the word the lazy way. */
9716 uint64_t *pu64Dst;
9717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9718 if (rc == VINF_SUCCESS)
9719 {
9720 *pu64Dst = u64Value;
9721 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9722 }
9723
9724 /* Commit the new RSP value unless we an access handler made trouble. */
9725 if (rc == VINF_SUCCESS)
9726 pCtx->rsp = uNewRsp;
9727
9728 return rc;
9729}
9730
9731
9732/**
9733 * Pops a word from the stack.
9734 *
9735 * @returns Strict VBox status code.
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param pu16Value Where to store the popped value.
9738 */
9739IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9740{
9741 /* Increment the stack pointer. */
9742 uint64_t uNewRsp;
9743 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9744 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9745
9746 /* Write the word the lazy way. */
9747 uint16_t const *pu16Src;
9748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9749 if (rc == VINF_SUCCESS)
9750 {
9751 *pu16Value = *pu16Src;
9752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9753
9754 /* Commit the new RSP value. */
9755 if (rc == VINF_SUCCESS)
9756 pCtx->rsp = uNewRsp;
9757 }
9758
9759 return rc;
9760}
9761
9762
9763/**
9764 * Pops a dword from the stack.
9765 *
9766 * @returns Strict VBox status code.
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu32Value Where to store the popped value.
9769 */
9770IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9771{
9772 /* Increment the stack pointer. */
9773 uint64_t uNewRsp;
9774 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9775 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9776
9777 /* Write the word the lazy way. */
9778 uint32_t const *pu32Src;
9779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9780 if (rc == VINF_SUCCESS)
9781 {
9782 *pu32Value = *pu32Src;
9783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9784
9785 /* Commit the new RSP value. */
9786 if (rc == VINF_SUCCESS)
9787 pCtx->rsp = uNewRsp;
9788 }
9789
9790 return rc;
9791}
9792
9793
9794/**
9795 * Pops a qword from the stack.
9796 *
9797 * @returns Strict VBox status code.
9798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9799 * @param pu64Value Where to store the popped value.
9800 */
9801IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9802{
9803 /* Increment the stack pointer. */
9804 uint64_t uNewRsp;
9805 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9806 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9807
9808 /* Write the word the lazy way. */
9809 uint64_t const *pu64Src;
9810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9811 if (rc == VINF_SUCCESS)
9812 {
9813 *pu64Value = *pu64Src;
9814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9815
9816 /* Commit the new RSP value. */
9817 if (rc == VINF_SUCCESS)
9818 pCtx->rsp = uNewRsp;
9819 }
9820
9821 return rc;
9822}
9823
9824
9825/**
9826 * Pushes a word onto the stack, using a temporary stack pointer.
9827 *
9828 * @returns Strict VBox status code.
9829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9830 * @param u16Value The value to push.
9831 * @param pTmpRsp Pointer to the temporary stack pointer.
9832 */
9833IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9834{
9835 /* Increment the stack pointer. */
9836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9837 RTUINT64U NewRsp = *pTmpRsp;
9838 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9839
9840 /* Write the word the lazy way. */
9841 uint16_t *pu16Dst;
9842 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9843 if (rc == VINF_SUCCESS)
9844 {
9845 *pu16Dst = u16Value;
9846 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9847 }
9848
9849 /* Commit the new RSP value unless we an access handler made trouble. */
9850 if (rc == VINF_SUCCESS)
9851 *pTmpRsp = NewRsp;
9852
9853 return rc;
9854}
9855
9856
9857/**
9858 * Pushes a dword onto the stack, using a temporary stack pointer.
9859 *
9860 * @returns Strict VBox status code.
9861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9862 * @param u32Value The value to push.
9863 * @param pTmpRsp Pointer to the temporary stack pointer.
9864 */
9865IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9866{
9867 /* Increment the stack pointer. */
9868 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9869 RTUINT64U NewRsp = *pTmpRsp;
9870 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9871
9872 /* Write the word the lazy way. */
9873 uint32_t *pu32Dst;
9874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9875 if (rc == VINF_SUCCESS)
9876 {
9877 *pu32Dst = u32Value;
9878 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9879 }
9880
9881 /* Commit the new RSP value unless we an access handler made trouble. */
9882 if (rc == VINF_SUCCESS)
9883 *pTmpRsp = NewRsp;
9884
9885 return rc;
9886}
9887
9888
9889/**
9890 * Pushes a dword onto the stack, using a temporary stack pointer.
9891 *
9892 * @returns Strict VBox status code.
9893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9894 * @param u64Value The value to push.
9895 * @param pTmpRsp Pointer to the temporary stack pointer.
9896 */
9897IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9898{
9899 /* Increment the stack pointer. */
9900 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9901 RTUINT64U NewRsp = *pTmpRsp;
9902 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9903
9904 /* Write the word the lazy way. */
9905 uint64_t *pu64Dst;
9906 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9907 if (rc == VINF_SUCCESS)
9908 {
9909 *pu64Dst = u64Value;
9910 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9911 }
9912
9913 /* Commit the new RSP value unless we an access handler made trouble. */
9914 if (rc == VINF_SUCCESS)
9915 *pTmpRsp = NewRsp;
9916
9917 return rc;
9918}
9919
9920
9921/**
9922 * Pops a word from the stack, using a temporary stack pointer.
9923 *
9924 * @returns Strict VBox status code.
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param pu16Value Where to store the popped value.
9927 * @param pTmpRsp Pointer to the temporary stack pointer.
9928 */
9929IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9930{
9931 /* Increment the stack pointer. */
9932 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9933 RTUINT64U NewRsp = *pTmpRsp;
9934 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9935
9936 /* Write the word the lazy way. */
9937 uint16_t const *pu16Src;
9938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9939 if (rc == VINF_SUCCESS)
9940 {
9941 *pu16Value = *pu16Src;
9942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9943
9944 /* Commit the new RSP value. */
9945 if (rc == VINF_SUCCESS)
9946 *pTmpRsp = NewRsp;
9947 }
9948
9949 return rc;
9950}
9951
9952
9953/**
9954 * Pops a dword from the stack, using a temporary stack pointer.
9955 *
9956 * @returns Strict VBox status code.
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param pu32Value Where to store the popped value.
9959 * @param pTmpRsp Pointer to the temporary stack pointer.
9960 */
9961IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9962{
9963 /* Increment the stack pointer. */
9964 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9965 RTUINT64U NewRsp = *pTmpRsp;
9966 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9967
9968 /* Write the word the lazy way. */
9969 uint32_t const *pu32Src;
9970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9971 if (rc == VINF_SUCCESS)
9972 {
9973 *pu32Value = *pu32Src;
9974 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9975
9976 /* Commit the new RSP value. */
9977 if (rc == VINF_SUCCESS)
9978 *pTmpRsp = NewRsp;
9979 }
9980
9981 return rc;
9982}
9983
9984
9985/**
9986 * Pops a qword from the stack, using a temporary stack pointer.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param pu64Value Where to store the popped value.
9991 * @param pTmpRsp Pointer to the temporary stack pointer.
9992 */
9993IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9994{
9995 /* Increment the stack pointer. */
9996 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9997 RTUINT64U NewRsp = *pTmpRsp;
9998 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9999
10000 /* Write the word the lazy way. */
10001 uint64_t const *pu64Src;
10002 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10003 if (rcStrict == VINF_SUCCESS)
10004 {
10005 *pu64Value = *pu64Src;
10006 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10007
10008 /* Commit the new RSP value. */
10009 if (rcStrict == VINF_SUCCESS)
10010 *pTmpRsp = NewRsp;
10011 }
10012
10013 return rcStrict;
10014}
10015
10016
10017/**
10018 * Begin a special stack push (used by interrupt, exceptions and such).
10019 *
10020 * This will raise \#SS or \#PF if appropriate.
10021 *
10022 * @returns Strict VBox status code.
10023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10024 * @param cbMem The number of bytes to push onto the stack.
10025 * @param ppvMem Where to return the pointer to the stack memory.
10026 * As with the other memory functions this could be
10027 * direct access or bounce buffered access, so
10028 * don't commit register until the commit call
10029 * succeeds.
10030 * @param puNewRsp Where to return the new RSP value. This must be
10031 * passed unchanged to
10032 * iemMemStackPushCommitSpecial().
10033 */
10034IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10035{
10036 Assert(cbMem < UINT8_MAX);
10037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10038 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10039 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10040}
10041
10042
10043/**
10044 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10045 *
10046 * This will update the rSP.
10047 *
10048 * @returns Strict VBox status code.
10049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10050 * @param pvMem The pointer returned by
10051 * iemMemStackPushBeginSpecial().
10052 * @param uNewRsp The new RSP value returned by
10053 * iemMemStackPushBeginSpecial().
10054 */
10055IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10056{
10057 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10058 if (rcStrict == VINF_SUCCESS)
10059 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10060 return rcStrict;
10061}
10062
10063
10064/**
10065 * Begin a special stack pop (used by iret, retf and such).
10066 *
10067 * This will raise \#SS or \#PF if appropriate.
10068 *
10069 * @returns Strict VBox status code.
10070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10071 * @param cbMem The number of bytes to pop from the stack.
10072 * @param ppvMem Where to return the pointer to the stack memory.
10073 * @param puNewRsp Where to return the new RSP value. This must be
10074 * assigned to CPUMCTX::rsp manually some time
10075 * after iemMemStackPopDoneSpecial() has been
10076 * called.
10077 */
10078IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10079{
10080 Assert(cbMem < UINT8_MAX);
10081 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10082 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10083 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10084}
10085
10086
10087/**
10088 * Continue a special stack pop (used by iret and retf).
10089 *
10090 * This will raise \#SS or \#PF if appropriate.
10091 *
10092 * @returns Strict VBox status code.
10093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10094 * @param cbMem The number of bytes to pop from the stack.
10095 * @param ppvMem Where to return the pointer to the stack memory.
10096 * @param puNewRsp Where to return the new RSP value. This must be
10097 * assigned to CPUMCTX::rsp manually some time
10098 * after iemMemStackPopDoneSpecial() has been
10099 * called.
10100 */
10101IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10102{
10103 Assert(cbMem < UINT8_MAX);
10104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10105 RTUINT64U NewRsp;
10106 NewRsp.u = *puNewRsp;
10107 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10108 *puNewRsp = NewRsp.u;
10109 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10110}
10111
10112
10113/**
10114 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10115 * iemMemStackPopContinueSpecial).
10116 *
10117 * The caller will manually commit the rSP.
10118 *
10119 * @returns Strict VBox status code.
10120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10121 * @param pvMem The pointer returned by
10122 * iemMemStackPopBeginSpecial() or
10123 * iemMemStackPopContinueSpecial().
10124 */
10125IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10126{
10127 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10128}
10129
10130
10131/**
10132 * Fetches a system table byte.
10133 *
10134 * @returns Strict VBox status code.
10135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10136 * @param pbDst Where to return the byte.
10137 * @param iSegReg The index of the segment register to use for
10138 * this access. The base and limits are checked.
10139 * @param GCPtrMem The address of the guest memory.
10140 */
10141IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10142{
10143 /* The lazy approach for now... */
10144 uint8_t const *pbSrc;
10145 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10146 if (rc == VINF_SUCCESS)
10147 {
10148 *pbDst = *pbSrc;
10149 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10150 }
10151 return rc;
10152}
10153
10154
10155/**
10156 * Fetches a system table word.
10157 *
10158 * @returns Strict VBox status code.
10159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10160 * @param pu16Dst Where to return the word.
10161 * @param iSegReg The index of the segment register to use for
10162 * this access. The base and limits are checked.
10163 * @param GCPtrMem The address of the guest memory.
10164 */
10165IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10166{
10167 /* The lazy approach for now... */
10168 uint16_t const *pu16Src;
10169 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10170 if (rc == VINF_SUCCESS)
10171 {
10172 *pu16Dst = *pu16Src;
10173 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10174 }
10175 return rc;
10176}
10177
10178
10179/**
10180 * Fetches a system table dword.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10184 * @param pu32Dst Where to return the dword.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 */
10189IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10190{
10191 /* The lazy approach for now... */
10192 uint32_t const *pu32Src;
10193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10194 if (rc == VINF_SUCCESS)
10195 {
10196 *pu32Dst = *pu32Src;
10197 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10198 }
10199 return rc;
10200}
10201
10202
10203/**
10204 * Fetches a system table qword.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param pu64Dst Where to return the qword.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 */
10213IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10214{
10215 /* The lazy approach for now... */
10216 uint64_t const *pu64Src;
10217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10218 if (rc == VINF_SUCCESS)
10219 {
10220 *pu64Dst = *pu64Src;
10221 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10222 }
10223 return rc;
10224}
10225
10226
10227/**
10228 * Fetches a descriptor table entry with caller specified error code.
10229 *
10230 * @returns Strict VBox status code.
10231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10232 * @param pDesc Where to return the descriptor table entry.
10233 * @param uSel The selector which table entry to fetch.
10234 * @param uXcpt The exception to raise on table lookup error.
10235 * @param uErrorCode The error code associated with the exception.
10236 */
10237IEM_STATIC VBOXSTRICTRC
10238iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10239{
10240 AssertPtr(pDesc);
10241 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10242
10243 /** @todo did the 286 require all 8 bytes to be accessible? */
10244 /*
10245 * Get the selector table base and check bounds.
10246 */
10247 RTGCPTR GCPtrBase;
10248 if (uSel & X86_SEL_LDT)
10249 {
10250 if ( !pCtx->ldtr.Attr.n.u1Present
10251 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10252 {
10253 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10254 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10255 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10256 uErrorCode, 0);
10257 }
10258
10259 Assert(pCtx->ldtr.Attr.n.u1Present);
10260 GCPtrBase = pCtx->ldtr.u64Base;
10261 }
10262 else
10263 {
10264 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10265 {
10266 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10267 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10268 uErrorCode, 0);
10269 }
10270 GCPtrBase = pCtx->gdtr.pGdt;
10271 }
10272
10273 /*
10274 * Read the legacy descriptor and maybe the long mode extensions if
10275 * required.
10276 */
10277 VBOXSTRICTRC rcStrict;
10278 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10279 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10280 else
10281 {
10282 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10283 if (rcStrict == VINF_SUCCESS)
10284 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10285 if (rcStrict == VINF_SUCCESS)
10286 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10287 if (rcStrict == VINF_SUCCESS)
10288 pDesc->Legacy.au16[3] = 0;
10289 else
10290 return rcStrict;
10291 }
10292
10293 if (rcStrict == VINF_SUCCESS)
10294 {
10295 if ( !IEM_IS_LONG_MODE(pVCpu)
10296 || pDesc->Legacy.Gen.u1DescType)
10297 pDesc->Long.au64[1] = 0;
10298 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10299 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10300 else
10301 {
10302 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10303 /** @todo is this the right exception? */
10304 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10305 }
10306 }
10307 return rcStrict;
10308}
10309
10310
10311/**
10312 * Fetches a descriptor table entry.
10313 *
10314 * @returns Strict VBox status code.
10315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10316 * @param pDesc Where to return the descriptor table entry.
10317 * @param uSel The selector which table entry to fetch.
10318 * @param uXcpt The exception to raise on table lookup error.
10319 */
10320IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10321{
10322 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10323}
10324
10325
10326/**
10327 * Fakes a long mode stack selector for SS = 0.
10328 *
10329 * @param pDescSs Where to return the fake stack descriptor.
10330 * @param uDpl The DPL we want.
10331 */
10332IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10333{
10334 pDescSs->Long.au64[0] = 0;
10335 pDescSs->Long.au64[1] = 0;
10336 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10337 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10338 pDescSs->Long.Gen.u2Dpl = uDpl;
10339 pDescSs->Long.Gen.u1Present = 1;
10340 pDescSs->Long.Gen.u1Long = 1;
10341}
10342
10343
10344/**
10345 * Marks the selector descriptor as accessed (only non-system descriptors).
10346 *
10347 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10348 * will therefore skip the limit checks.
10349 *
10350 * @returns Strict VBox status code.
10351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10352 * @param uSel The selector.
10353 */
10354IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10355{
10356 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10357
10358 /*
10359 * Get the selector table base and calculate the entry address.
10360 */
10361 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10362 ? pCtx->ldtr.u64Base
10363 : pCtx->gdtr.pGdt;
10364 GCPtr += uSel & X86_SEL_MASK;
10365
10366 /*
10367 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10368 * ugly stuff to avoid this. This will make sure it's an atomic access
10369 * as well more or less remove any question about 8-bit or 32-bit accesss.
10370 */
10371 VBOXSTRICTRC rcStrict;
10372 uint32_t volatile *pu32;
10373 if ((GCPtr & 3) == 0)
10374 {
10375 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10376 GCPtr += 2 + 2;
10377 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10378 if (rcStrict != VINF_SUCCESS)
10379 return rcStrict;
10380 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10381 }
10382 else
10383 {
10384 /* The misaligned GDT/LDT case, map the whole thing. */
10385 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10386 if (rcStrict != VINF_SUCCESS)
10387 return rcStrict;
10388 switch ((uintptr_t)pu32 & 3)
10389 {
10390 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10391 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10392 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10393 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10394 }
10395 }
10396
10397 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10398}
10399
10400/** @} */
10401
10402
10403/*
10404 * Include the C/C++ implementation of instruction.
10405 */
10406#include "IEMAllCImpl.cpp.h"
10407
10408
10409
10410/** @name "Microcode" macros.
10411 *
10412 * The idea is that we should be able to use the same code to interpret
10413 * instructions as well as recompiler instructions. Thus this obfuscation.
10414 *
10415 * @{
10416 */
10417#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10418#define IEM_MC_END() }
10419#define IEM_MC_PAUSE() do {} while (0)
10420#define IEM_MC_CONTINUE() do {} while (0)
10421
10422/** Internal macro. */
10423#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10424 do \
10425 { \
10426 VBOXSTRICTRC rcStrict2 = a_Expr; \
10427 if (rcStrict2 != VINF_SUCCESS) \
10428 return rcStrict2; \
10429 } while (0)
10430
10431
10432#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10433#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10434#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10435#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10436#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10437#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10438#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10439#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10440#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10441 do { \
10442 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10443 return iemRaiseDeviceNotAvailable(pVCpu); \
10444 } while (0)
10445#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10446 do { \
10447 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10448 return iemRaiseMathFault(pVCpu); \
10449 } while (0)
10450#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10451 do { \
10452 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10453 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10454 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10455 return iemRaiseUndefinedOpcode(pVCpu); \
10456 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10457 return iemRaiseDeviceNotAvailable(pVCpu); \
10458 } while (0)
10459#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10460 do { \
10461 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10462 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10463 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10464 return iemRaiseUndefinedOpcode(pVCpu); \
10465 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10466 return iemRaiseDeviceNotAvailable(pVCpu); \
10467 } while (0)
10468#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10469 do { \
10470 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10471 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10472 return iemRaiseUndefinedOpcode(pVCpu); \
10473 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10474 return iemRaiseDeviceNotAvailable(pVCpu); \
10475 } while (0)
10476#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10477 do { \
10478 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10479 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10480 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10481 return iemRaiseUndefinedOpcode(pVCpu); \
10482 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10483 return iemRaiseDeviceNotAvailable(pVCpu); \
10484 } while (0)
10485#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10486 do { \
10487 if (pVCpu->iem.s.uCpl != 0) \
10488 return iemRaiseGeneralProtectionFault0(pVCpu); \
10489 } while (0)
10490#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10491 do { \
10492 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10493 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10494 } while (0)
10495
10496
10497#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10498#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10499#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10500#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10501#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10502#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10503#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10504 uint32_t a_Name; \
10505 uint32_t *a_pName = &a_Name
10506#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10507 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10508
10509#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10510#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10511
10512#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10513#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10514#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10515#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10516#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10517#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10518#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10519#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10520#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10521#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10522#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10523#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10524#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10525#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10526#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10527#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10528#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10529#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10530#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10531#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10532#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10533#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10534#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10535#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10536#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10537#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10538#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10539#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10540#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10541/** @note Not for IOPL or IF testing or modification. */
10542#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10543#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10544#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10545#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10546
10547#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10548#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10549#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10550#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10551#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10552#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10553#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10554#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10555#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10556#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10557#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10558 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10559
10560#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10561#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10562/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10563 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10564#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10565#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10566/** @note Not for IOPL or IF testing or modification. */
10567#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10568
10569#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10570#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10571#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10572 do { \
10573 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10574 *pu32Reg += (a_u32Value); \
10575 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10576 } while (0)
10577#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10578
10579#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10580#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10581#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10582 do { \
10583 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10584 *pu32Reg -= (a_u32Value); \
10585 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10586 } while (0)
10587#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10588#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10589
10590#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10591#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10592#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10593#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10594#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10595#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10596#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10597
10598#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10599#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10600#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10601#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10602
10603#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10604#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10605#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10606
10607#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10608#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10609#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10610
10611#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10612#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10613#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10614
10615#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10616#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10617#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10618
10619#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10620
10621#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10622
10623#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10624#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10625#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10626 do { \
10627 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10628 *pu32Reg &= (a_u32Value); \
10629 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10630 } while (0)
10631#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10632
10633#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10634#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10635#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10636 do { \
10637 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10638 *pu32Reg |= (a_u32Value); \
10639 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10640 } while (0)
10641#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10642
10643
10644/** @note Not for IOPL or IF modification. */
10645#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10646/** @note Not for IOPL or IF modification. */
10647#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10648/** @note Not for IOPL or IF modification. */
10649#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10650
10651#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10652
10653
10654#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10655 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10656#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10657 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10658#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10659 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10660#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10661 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10662#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10663 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10664#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10665 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10666#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10667 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10668
10669#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10670 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10671#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10672 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10673#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10674 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10675#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10676 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10677#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10678 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10679#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10680 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10681 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10682 } while (0)
10683#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10684 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10685 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10686 } while (0)
10687#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10688 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10689#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10690 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10691#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10692 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10693#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10694 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10695 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10696
10697#ifndef IEM_WITH_SETJMP
10698# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10700# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10702# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10704#else
10705# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10706 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10707# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10708 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10709# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10710 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10711#endif
10712
10713#ifndef IEM_WITH_SETJMP
10714# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10716# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10718# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10720#else
10721# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10722 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10723# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10724 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10725# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10726 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10727#endif
10728
10729#ifndef IEM_WITH_SETJMP
10730# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10732# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10734# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10736#else
10737# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10738 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10739# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10740 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10741# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10742 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10743#endif
10744
10745#ifdef SOME_UNUSED_FUNCTION
10746# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10748#endif
10749
10750#ifndef IEM_WITH_SETJMP
10751# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10753# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10755# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10757# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10759#else
10760# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10761 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10762# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10763 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10764# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10765 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10766# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10767 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10768#endif
10769
10770#ifndef IEM_WITH_SETJMP
10771# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10773# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10775# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10777#else
10778# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10779 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10780# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10781 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10782# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10783 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10784#endif
10785
10786#ifndef IEM_WITH_SETJMP
10787# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10789# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10791#else
10792# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10793 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10794# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10795 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10796#endif
10797
10798
10799
10800#ifndef IEM_WITH_SETJMP
10801# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10802 do { \
10803 uint8_t u8Tmp; \
10804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10805 (a_u16Dst) = u8Tmp; \
10806 } while (0)
10807# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10808 do { \
10809 uint8_t u8Tmp; \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10811 (a_u32Dst) = u8Tmp; \
10812 } while (0)
10813# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10814 do { \
10815 uint8_t u8Tmp; \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10817 (a_u64Dst) = u8Tmp; \
10818 } while (0)
10819# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10820 do { \
10821 uint16_t u16Tmp; \
10822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10823 (a_u32Dst) = u16Tmp; \
10824 } while (0)
10825# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10826 do { \
10827 uint16_t u16Tmp; \
10828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10829 (a_u64Dst) = u16Tmp; \
10830 } while (0)
10831# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10832 do { \
10833 uint32_t u32Tmp; \
10834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10835 (a_u64Dst) = u32Tmp; \
10836 } while (0)
10837#else /* IEM_WITH_SETJMP */
10838# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10839 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10840# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10841 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10842# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10843 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10844# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10845 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10846# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10847 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10848# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10849 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10850#endif /* IEM_WITH_SETJMP */
10851
10852#ifndef IEM_WITH_SETJMP
10853# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10854 do { \
10855 uint8_t u8Tmp; \
10856 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10857 (a_u16Dst) = (int8_t)u8Tmp; \
10858 } while (0)
10859# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10860 do { \
10861 uint8_t u8Tmp; \
10862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10863 (a_u32Dst) = (int8_t)u8Tmp; \
10864 } while (0)
10865# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10866 do { \
10867 uint8_t u8Tmp; \
10868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10869 (a_u64Dst) = (int8_t)u8Tmp; \
10870 } while (0)
10871# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10872 do { \
10873 uint16_t u16Tmp; \
10874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10875 (a_u32Dst) = (int16_t)u16Tmp; \
10876 } while (0)
10877# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10878 do { \
10879 uint16_t u16Tmp; \
10880 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10881 (a_u64Dst) = (int16_t)u16Tmp; \
10882 } while (0)
10883# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10884 do { \
10885 uint32_t u32Tmp; \
10886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10887 (a_u64Dst) = (int32_t)u32Tmp; \
10888 } while (0)
10889#else /* IEM_WITH_SETJMP */
10890# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10891 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10892# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10893 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10894# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10895 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10896# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10897 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10898# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10899 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10900# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10901 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10902#endif /* IEM_WITH_SETJMP */
10903
10904#ifndef IEM_WITH_SETJMP
10905# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10906 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10907# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10908 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10909# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10911# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10913#else
10914# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10915 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10916# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10917 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10918# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10919 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10920# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10921 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10922#endif
10923
10924#ifndef IEM_WITH_SETJMP
10925# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10926 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10927# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10928 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10929# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10930 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10931# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10933#else
10934# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10935 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10936# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10937 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10938# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10939 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10940# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10941 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10942#endif
10943
10944#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10945#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10946#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10947#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10948#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10949#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10950#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10951 do { \
10952 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10953 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10954 } while (0)
10955
10956#ifndef IEM_WITH_SETJMP
10957# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10958 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10959# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10960 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10961#else
10962# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10963 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10964# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10965 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10966#endif
10967
10968
10969#define IEM_MC_PUSH_U16(a_u16Value) \
10970 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10971#define IEM_MC_PUSH_U32(a_u32Value) \
10972 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10973#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10974 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10975#define IEM_MC_PUSH_U64(a_u64Value) \
10976 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10977
10978#define IEM_MC_POP_U16(a_pu16Value) \
10979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10980#define IEM_MC_POP_U32(a_pu32Value) \
10981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10982#define IEM_MC_POP_U64(a_pu64Value) \
10983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10984
10985/** Maps guest memory for direct or bounce buffered access.
10986 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10987 * @remarks May return.
10988 */
10989#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10990 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10991
10992/** Maps guest memory for direct or bounce buffered access.
10993 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10994 * @remarks May return.
10995 */
10996#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10997 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10998
10999/** Commits the memory and unmaps the guest memory.
11000 * @remarks May return.
11001 */
11002#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11003 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11004
11005/** Commits the memory and unmaps the guest memory unless the FPU status word
11006 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11007 * that would cause FLD not to store.
11008 *
11009 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11010 * store, while \#P will not.
11011 *
11012 * @remarks May in theory return - for now.
11013 */
11014#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11015 do { \
11016 if ( !(a_u16FSW & X86_FSW_ES) \
11017 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11018 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11019 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11020 } while (0)
11021
11022/** Calculate efficient address from R/M. */
11023#ifndef IEM_WITH_SETJMP
11024# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11025 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11026#else
11027# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11028 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11029#endif
11030
11031#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11032#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11033#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11034#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11035#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11036#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11037#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11038
11039/**
11040 * Defers the rest of the instruction emulation to a C implementation routine
11041 * and returns, only taking the standard parameters.
11042 *
11043 * @param a_pfnCImpl The pointer to the C routine.
11044 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11045 */
11046#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11047
11048/**
11049 * Defers the rest of instruction emulation to a C implementation routine and
11050 * returns, taking one argument in addition to the standard ones.
11051 *
11052 * @param a_pfnCImpl The pointer to the C routine.
11053 * @param a0 The argument.
11054 */
11055#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11056
11057/**
11058 * Defers the rest of the instruction emulation to a C implementation routine
11059 * and returns, taking two arguments in addition to the standard ones.
11060 *
11061 * @param a_pfnCImpl The pointer to the C routine.
11062 * @param a0 The first extra argument.
11063 * @param a1 The second extra argument.
11064 */
11065#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11066
11067/**
11068 * Defers the rest of the instruction emulation to a C implementation routine
11069 * and returns, taking three arguments in addition to the standard ones.
11070 *
11071 * @param a_pfnCImpl The pointer to the C routine.
11072 * @param a0 The first extra argument.
11073 * @param a1 The second extra argument.
11074 * @param a2 The third extra argument.
11075 */
11076#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11077
11078/**
11079 * Defers the rest of the instruction emulation to a C implementation routine
11080 * and returns, taking four arguments in addition to the standard ones.
11081 *
11082 * @param a_pfnCImpl The pointer to the C routine.
11083 * @param a0 The first extra argument.
11084 * @param a1 The second extra argument.
11085 * @param a2 The third extra argument.
11086 * @param a3 The fourth extra argument.
11087 */
11088#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11089
11090/**
11091 * Defers the rest of the instruction emulation to a C implementation routine
11092 * and returns, taking two arguments in addition to the standard ones.
11093 *
11094 * @param a_pfnCImpl The pointer to the C routine.
11095 * @param a0 The first extra argument.
11096 * @param a1 The second extra argument.
11097 * @param a2 The third extra argument.
11098 * @param a3 The fourth extra argument.
11099 * @param a4 The fifth extra argument.
11100 */
11101#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11102
11103/**
11104 * Defers the entire instruction emulation to a C implementation routine and
11105 * returns, only taking the standard parameters.
11106 *
11107 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11108 *
11109 * @param a_pfnCImpl The pointer to the C routine.
11110 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11111 */
11112#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11113
11114/**
11115 * Defers the entire instruction emulation to a C implementation routine and
11116 * returns, taking one argument in addition to the standard ones.
11117 *
11118 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11119 *
11120 * @param a_pfnCImpl The pointer to the C routine.
11121 * @param a0 The argument.
11122 */
11123#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11124
11125/**
11126 * Defers the entire instruction emulation to a C implementation routine and
11127 * returns, taking two arguments in addition to the standard ones.
11128 *
11129 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11130 *
11131 * @param a_pfnCImpl The pointer to the C routine.
11132 * @param a0 The first extra argument.
11133 * @param a1 The second extra argument.
11134 */
11135#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11136
11137/**
11138 * Defers the entire instruction emulation to a C implementation routine and
11139 * returns, taking three arguments in addition to the standard ones.
11140 *
11141 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11142 *
11143 * @param a_pfnCImpl The pointer to the C routine.
11144 * @param a0 The first extra argument.
11145 * @param a1 The second extra argument.
11146 * @param a2 The third extra argument.
11147 */
11148#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11149
11150/**
11151 * Calls a FPU assembly implementation taking one visible argument.
11152 *
11153 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11154 * @param a0 The first extra argument.
11155 */
11156#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11157 do { \
11158 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11159 } while (0)
11160
11161/**
11162 * Calls a FPU assembly implementation taking two visible arguments.
11163 *
11164 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11165 * @param a0 The first extra argument.
11166 * @param a1 The second extra argument.
11167 */
11168#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11169 do { \
11170 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11171 } while (0)
11172
11173/**
11174 * Calls a FPU assembly implementation taking three visible arguments.
11175 *
11176 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11177 * @param a0 The first extra argument.
11178 * @param a1 The second extra argument.
11179 * @param a2 The third extra argument.
11180 */
11181#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11182 do { \
11183 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11184 } while (0)
11185
11186#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11187 do { \
11188 (a_FpuData).FSW = (a_FSW); \
11189 (a_FpuData).r80Result = *(a_pr80Value); \
11190 } while (0)
11191
11192/** Pushes FPU result onto the stack. */
11193#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11194 iemFpuPushResult(pVCpu, &a_FpuData)
11195/** Pushes FPU result onto the stack and sets the FPUDP. */
11196#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11197 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11198
11199/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11200#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11201 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11202
11203/** Stores FPU result in a stack register. */
11204#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11205 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11206/** Stores FPU result in a stack register and pops the stack. */
11207#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11208 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11209/** Stores FPU result in a stack register and sets the FPUDP. */
11210#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11211 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11212/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11213 * stack. */
11214#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11215 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11216
11217/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11218#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11219 iemFpuUpdateOpcodeAndIp(pVCpu)
11220/** Free a stack register (for FFREE and FFREEP). */
11221#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11222 iemFpuStackFree(pVCpu, a_iStReg)
11223/** Increment the FPU stack pointer. */
11224#define IEM_MC_FPU_STACK_INC_TOP() \
11225 iemFpuStackIncTop(pVCpu)
11226/** Decrement the FPU stack pointer. */
11227#define IEM_MC_FPU_STACK_DEC_TOP() \
11228 iemFpuStackDecTop(pVCpu)
11229
11230/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11231#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11232 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11233/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11234#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11235 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11236/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11237#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11238 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11239/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11240#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11241 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11242/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11243 * stack. */
11244#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11245 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11246/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11247#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11248 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11249
11250/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11251#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11252 iemFpuStackUnderflow(pVCpu, a_iStDst)
11253/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11254 * stack. */
11255#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11256 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11257/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11258 * FPUDS. */
11259#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11260 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11261/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11262 * FPUDS. Pops stack. */
11263#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11264 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11265/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11266 * stack twice. */
11267#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11268 iemFpuStackUnderflowThenPopPop(pVCpu)
11269/** Raises a FPU stack underflow exception for an instruction pushing a result
11270 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11271#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11272 iemFpuStackPushUnderflow(pVCpu)
11273/** Raises a FPU stack underflow exception for an instruction pushing a result
11274 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11275#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11276 iemFpuStackPushUnderflowTwo(pVCpu)
11277
11278/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11279 * FPUIP, FPUCS and FOP. */
11280#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11281 iemFpuStackPushOverflow(pVCpu)
11282/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11283 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11284#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11285 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11286/** Prepares for using the FPU state.
11287 * Ensures that we can use the host FPU in the current context (RC+R0.
11288 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11289#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11290/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11291#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11292/** Actualizes the guest FPU state so it can be accessed and modified. */
11293#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11294
11295/** Prepares for using the SSE state.
11296 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11297 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11298#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11299/** Actualizes the guest XMM0..15 register state for read-only access. */
11300#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11301/** Actualizes the guest XMM0..15 register state for read-write access. */
11302#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11303
11304/**
11305 * Calls a MMX assembly implementation taking two visible arguments.
11306 *
11307 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11308 * @param a0 The first extra argument.
11309 * @param a1 The second extra argument.
11310 */
11311#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11312 do { \
11313 IEM_MC_PREPARE_FPU_USAGE(); \
11314 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11315 } while (0)
11316
11317/**
11318 * Calls a MMX assembly implementation taking three visible arguments.
11319 *
11320 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11321 * @param a0 The first extra argument.
11322 * @param a1 The second extra argument.
11323 * @param a2 The third extra argument.
11324 */
11325#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11326 do { \
11327 IEM_MC_PREPARE_FPU_USAGE(); \
11328 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11329 } while (0)
11330
11331
11332/**
11333 * Calls a SSE assembly implementation taking two visible arguments.
11334 *
11335 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11336 * @param a0 The first extra argument.
11337 * @param a1 The second extra argument.
11338 */
11339#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11340 do { \
11341 IEM_MC_PREPARE_SSE_USAGE(); \
11342 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11343 } while (0)
11344
11345/**
11346 * Calls a SSE assembly implementation taking three visible arguments.
11347 *
11348 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11349 * @param a0 The first extra argument.
11350 * @param a1 The second extra argument.
11351 * @param a2 The third extra argument.
11352 */
11353#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11354 do { \
11355 IEM_MC_PREPARE_SSE_USAGE(); \
11356 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11357 } while (0)
11358
11359/** @note Not for IOPL or IF testing. */
11360#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11361/** @note Not for IOPL or IF testing. */
11362#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11363/** @note Not for IOPL or IF testing. */
11364#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11365/** @note Not for IOPL or IF testing. */
11366#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11367/** @note Not for IOPL or IF testing. */
11368#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11369 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11370 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11371/** @note Not for IOPL or IF testing. */
11372#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11373 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11374 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11375/** @note Not for IOPL or IF testing. */
11376#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11377 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11378 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11379 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11380/** @note Not for IOPL or IF testing. */
11381#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11382 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11383 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11384 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11385#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11386#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11387#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11388/** @note Not for IOPL or IF testing. */
11389#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11390 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11391 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11392/** @note Not for IOPL or IF testing. */
11393#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11394 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11395 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11396/** @note Not for IOPL or IF testing. */
11397#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11398 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11399 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11400/** @note Not for IOPL or IF testing. */
11401#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11402 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11403 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11404/** @note Not for IOPL or IF testing. */
11405#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11406 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11407 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11408/** @note Not for IOPL or IF testing. */
11409#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11410 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11411 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11412#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11413#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11414
11415#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11416 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11417#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11418 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11419#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11420 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11421#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11422 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11423#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11424 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11425#define IEM_MC_IF_FCW_IM() \
11426 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11427
11428#define IEM_MC_ELSE() } else {
11429#define IEM_MC_ENDIF() } do {} while (0)
11430
11431/** @} */
11432
11433
11434/** @name Opcode Debug Helpers.
11435 * @{
11436 */
11437#ifdef VBOX_WITH_STATISTICS
11438# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11439#else
11440# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11441#endif
11442
11443#ifdef DEBUG
11444# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11445 do { \
11446 IEMOP_INC_STATS(a_Stats); \
11447 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11448 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11449 } while (0)
11450#else
11451# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11452#endif
11453
11454/** @} */
11455
11456
11457/** @name Opcode Helpers.
11458 * @{
11459 */
11460
11461#ifdef IN_RING3
11462# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11463 do { \
11464 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11465 else \
11466 { \
11467 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11468 return IEMOP_RAISE_INVALID_OPCODE(); \
11469 } \
11470 } while (0)
11471#else
11472# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11473 do { \
11474 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11475 else return IEMOP_RAISE_INVALID_OPCODE(); \
11476 } while (0)
11477#endif
11478
11479/** The instruction requires a 186 or later. */
11480#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11481# define IEMOP_HLP_MIN_186() do { } while (0)
11482#else
11483# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11484#endif
11485
11486/** The instruction requires a 286 or later. */
11487#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11488# define IEMOP_HLP_MIN_286() do { } while (0)
11489#else
11490# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11491#endif
11492
11493/** The instruction requires a 386 or later. */
11494#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11495# define IEMOP_HLP_MIN_386() do { } while (0)
11496#else
11497# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11498#endif
11499
11500/** The instruction requires a 386 or later if the given expression is true. */
11501#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11502# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11503#else
11504# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11505#endif
11506
11507/** The instruction requires a 486 or later. */
11508#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11509# define IEMOP_HLP_MIN_486() do { } while (0)
11510#else
11511# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11512#endif
11513
11514/** The instruction requires a Pentium (586) or later. */
11515#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11516# define IEMOP_HLP_MIN_586() do { } while (0)
11517#else
11518# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11519#endif
11520
11521/** The instruction requires a PentiumPro (686) or later. */
11522#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11523# define IEMOP_HLP_MIN_686() do { } while (0)
11524#else
11525# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11526#endif
11527
11528
11529/** The instruction raises an \#UD in real and V8086 mode. */
11530#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11531 do \
11532 { \
11533 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11534 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11535 } while (0)
11536
11537/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11538 * 64-bit mode. */
11539#define IEMOP_HLP_NO_64BIT() \
11540 do \
11541 { \
11542 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11543 return IEMOP_RAISE_INVALID_OPCODE(); \
11544 } while (0)
11545
11546/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11547 * 64-bit mode. */
11548#define IEMOP_HLP_ONLY_64BIT() \
11549 do \
11550 { \
11551 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11552 return IEMOP_RAISE_INVALID_OPCODE(); \
11553 } while (0)
11554
11555/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11556#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11557 do \
11558 { \
11559 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11560 iemRecalEffOpSize64Default(pVCpu); \
11561 } while (0)
11562
11563/** The instruction has 64-bit operand size if 64-bit mode. */
11564#define IEMOP_HLP_64BIT_OP_SIZE() \
11565 do \
11566 { \
11567 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11568 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11569 } while (0)
11570
11571/** Only a REX prefix immediately preceeding the first opcode byte takes
11572 * effect. This macro helps ensuring this as well as logging bad guest code. */
11573#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11574 do \
11575 { \
11576 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11577 { \
11578 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11579 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11580 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11581 pVCpu->iem.s.uRexB = 0; \
11582 pVCpu->iem.s.uRexIndex = 0; \
11583 pVCpu->iem.s.uRexReg = 0; \
11584 iemRecalEffOpSize(pVCpu); \
11585 } \
11586 } while (0)
11587
11588/**
11589 * Done decoding.
11590 */
11591#define IEMOP_HLP_DONE_DECODING() \
11592 do \
11593 { \
11594 /*nothing for now, maybe later... */ \
11595 } while (0)
11596
11597/**
11598 * Done decoding, raise \#UD exception if lock prefix present.
11599 */
11600#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11601 do \
11602 { \
11603 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11604 { /* likely */ } \
11605 else \
11606 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11607 } while (0)
11608#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11609 do \
11610 { \
11611 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11612 { /* likely */ } \
11613 else \
11614 { \
11615 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11616 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11617 } \
11618 } while (0)
11619#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11620 do \
11621 { \
11622 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11623 { /* likely */ } \
11624 else \
11625 { \
11626 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11627 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11628 } \
11629 } while (0)
11630
11631/**
11632 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11633 * are present.
11634 */
11635#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11636 do \
11637 { \
11638 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11639 { /* likely */ } \
11640 else \
11641 return IEMOP_RAISE_INVALID_OPCODE(); \
11642 } while (0)
11643
11644
11645/**
11646 * Calculates the effective address of a ModR/M memory operand.
11647 *
11648 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11649 *
11650 * @return Strict VBox status code.
11651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11652 * @param bRm The ModRM byte.
11653 * @param cbImm The size of any immediate following the
11654 * effective address opcode bytes. Important for
11655 * RIP relative addressing.
11656 * @param pGCPtrEff Where to return the effective address.
11657 */
11658IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11659{
11660 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11661 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11662# define SET_SS_DEF() \
11663 do \
11664 { \
11665 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11666 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11667 } while (0)
11668
11669 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11670 {
11671/** @todo Check the effective address size crap! */
11672 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11673 {
11674 uint16_t u16EffAddr;
11675
11676 /* Handle the disp16 form with no registers first. */
11677 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11678 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11679 else
11680 {
11681 /* Get the displacment. */
11682 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11683 {
11684 case 0: u16EffAddr = 0; break;
11685 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11686 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11687 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11688 }
11689
11690 /* Add the base and index registers to the disp. */
11691 switch (bRm & X86_MODRM_RM_MASK)
11692 {
11693 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11694 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11695 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11696 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11697 case 4: u16EffAddr += pCtx->si; break;
11698 case 5: u16EffAddr += pCtx->di; break;
11699 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11700 case 7: u16EffAddr += pCtx->bx; break;
11701 }
11702 }
11703
11704 *pGCPtrEff = u16EffAddr;
11705 }
11706 else
11707 {
11708 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11709 uint32_t u32EffAddr;
11710
11711 /* Handle the disp32 form with no registers first. */
11712 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11713 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11714 else
11715 {
11716 /* Get the register (or SIB) value. */
11717 switch ((bRm & X86_MODRM_RM_MASK))
11718 {
11719 case 0: u32EffAddr = pCtx->eax; break;
11720 case 1: u32EffAddr = pCtx->ecx; break;
11721 case 2: u32EffAddr = pCtx->edx; break;
11722 case 3: u32EffAddr = pCtx->ebx; break;
11723 case 4: /* SIB */
11724 {
11725 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11726
11727 /* Get the index and scale it. */
11728 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11729 {
11730 case 0: u32EffAddr = pCtx->eax; break;
11731 case 1: u32EffAddr = pCtx->ecx; break;
11732 case 2: u32EffAddr = pCtx->edx; break;
11733 case 3: u32EffAddr = pCtx->ebx; break;
11734 case 4: u32EffAddr = 0; /*none */ break;
11735 case 5: u32EffAddr = pCtx->ebp; break;
11736 case 6: u32EffAddr = pCtx->esi; break;
11737 case 7: u32EffAddr = pCtx->edi; break;
11738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11739 }
11740 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11741
11742 /* add base */
11743 switch (bSib & X86_SIB_BASE_MASK)
11744 {
11745 case 0: u32EffAddr += pCtx->eax; break;
11746 case 1: u32EffAddr += pCtx->ecx; break;
11747 case 2: u32EffAddr += pCtx->edx; break;
11748 case 3: u32EffAddr += pCtx->ebx; break;
11749 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11750 case 5:
11751 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11752 {
11753 u32EffAddr += pCtx->ebp;
11754 SET_SS_DEF();
11755 }
11756 else
11757 {
11758 uint32_t u32Disp;
11759 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11760 u32EffAddr += u32Disp;
11761 }
11762 break;
11763 case 6: u32EffAddr += pCtx->esi; break;
11764 case 7: u32EffAddr += pCtx->edi; break;
11765 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11766 }
11767 break;
11768 }
11769 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11770 case 6: u32EffAddr = pCtx->esi; break;
11771 case 7: u32EffAddr = pCtx->edi; break;
11772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11773 }
11774
11775 /* Get and add the displacement. */
11776 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11777 {
11778 case 0:
11779 break;
11780 case 1:
11781 {
11782 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11783 u32EffAddr += i8Disp;
11784 break;
11785 }
11786 case 2:
11787 {
11788 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11789 u32EffAddr += u32Disp;
11790 break;
11791 }
11792 default:
11793 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11794 }
11795
11796 }
11797 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11798 *pGCPtrEff = u32EffAddr;
11799 else
11800 {
11801 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11802 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11803 }
11804 }
11805 }
11806 else
11807 {
11808 uint64_t u64EffAddr;
11809
11810 /* Handle the rip+disp32 form with no registers first. */
11811 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11812 {
11813 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11814 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11815 }
11816 else
11817 {
11818 /* Get the register (or SIB) value. */
11819 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11820 {
11821 case 0: u64EffAddr = pCtx->rax; break;
11822 case 1: u64EffAddr = pCtx->rcx; break;
11823 case 2: u64EffAddr = pCtx->rdx; break;
11824 case 3: u64EffAddr = pCtx->rbx; break;
11825 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11826 case 6: u64EffAddr = pCtx->rsi; break;
11827 case 7: u64EffAddr = pCtx->rdi; break;
11828 case 8: u64EffAddr = pCtx->r8; break;
11829 case 9: u64EffAddr = pCtx->r9; break;
11830 case 10: u64EffAddr = pCtx->r10; break;
11831 case 11: u64EffAddr = pCtx->r11; break;
11832 case 13: u64EffAddr = pCtx->r13; break;
11833 case 14: u64EffAddr = pCtx->r14; break;
11834 case 15: u64EffAddr = pCtx->r15; break;
11835 /* SIB */
11836 case 4:
11837 case 12:
11838 {
11839 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11840
11841 /* Get the index and scale it. */
11842 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11843 {
11844 case 0: u64EffAddr = pCtx->rax; break;
11845 case 1: u64EffAddr = pCtx->rcx; break;
11846 case 2: u64EffAddr = pCtx->rdx; break;
11847 case 3: u64EffAddr = pCtx->rbx; break;
11848 case 4: u64EffAddr = 0; /*none */ break;
11849 case 5: u64EffAddr = pCtx->rbp; break;
11850 case 6: u64EffAddr = pCtx->rsi; break;
11851 case 7: u64EffAddr = pCtx->rdi; break;
11852 case 8: u64EffAddr = pCtx->r8; break;
11853 case 9: u64EffAddr = pCtx->r9; break;
11854 case 10: u64EffAddr = pCtx->r10; break;
11855 case 11: u64EffAddr = pCtx->r11; break;
11856 case 12: u64EffAddr = pCtx->r12; break;
11857 case 13: u64EffAddr = pCtx->r13; break;
11858 case 14: u64EffAddr = pCtx->r14; break;
11859 case 15: u64EffAddr = pCtx->r15; break;
11860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11861 }
11862 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11863
11864 /* add base */
11865 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11866 {
11867 case 0: u64EffAddr += pCtx->rax; break;
11868 case 1: u64EffAddr += pCtx->rcx; break;
11869 case 2: u64EffAddr += pCtx->rdx; break;
11870 case 3: u64EffAddr += pCtx->rbx; break;
11871 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11872 case 6: u64EffAddr += pCtx->rsi; break;
11873 case 7: u64EffAddr += pCtx->rdi; break;
11874 case 8: u64EffAddr += pCtx->r8; break;
11875 case 9: u64EffAddr += pCtx->r9; break;
11876 case 10: u64EffAddr += pCtx->r10; break;
11877 case 11: u64EffAddr += pCtx->r11; break;
11878 case 12: u64EffAddr += pCtx->r12; break;
11879 case 14: u64EffAddr += pCtx->r14; break;
11880 case 15: u64EffAddr += pCtx->r15; break;
11881 /* complicated encodings */
11882 case 5:
11883 case 13:
11884 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11885 {
11886 if (!pVCpu->iem.s.uRexB)
11887 {
11888 u64EffAddr += pCtx->rbp;
11889 SET_SS_DEF();
11890 }
11891 else
11892 u64EffAddr += pCtx->r13;
11893 }
11894 else
11895 {
11896 uint32_t u32Disp;
11897 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11898 u64EffAddr += (int32_t)u32Disp;
11899 }
11900 break;
11901 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11902 }
11903 break;
11904 }
11905 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11906 }
11907
11908 /* Get and add the displacement. */
11909 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11910 {
11911 case 0:
11912 break;
11913 case 1:
11914 {
11915 int8_t i8Disp;
11916 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11917 u64EffAddr += i8Disp;
11918 break;
11919 }
11920 case 2:
11921 {
11922 uint32_t u32Disp;
11923 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11924 u64EffAddr += (int32_t)u32Disp;
11925 break;
11926 }
11927 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11928 }
11929
11930 }
11931
11932 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11933 *pGCPtrEff = u64EffAddr;
11934 else
11935 {
11936 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11937 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11938 }
11939 }
11940
11941 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11942 return VINF_SUCCESS;
11943}
11944
11945
11946/**
11947 * Calculates the effective address of a ModR/M memory operand.
11948 *
11949 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11950 *
11951 * @return Strict VBox status code.
11952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11953 * @param bRm The ModRM byte.
11954 * @param cbImm The size of any immediate following the
11955 * effective address opcode bytes. Important for
11956 * RIP relative addressing.
11957 * @param pGCPtrEff Where to return the effective address.
11958 * @param offRsp RSP displacement.
11959 */
11960IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11961{
11962 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11963 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11964# define SET_SS_DEF() \
11965 do \
11966 { \
11967 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11968 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11969 } while (0)
11970
11971 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11972 {
11973/** @todo Check the effective address size crap! */
11974 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11975 {
11976 uint16_t u16EffAddr;
11977
11978 /* Handle the disp16 form with no registers first. */
11979 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11980 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11981 else
11982 {
11983 /* Get the displacment. */
11984 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11985 {
11986 case 0: u16EffAddr = 0; break;
11987 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11988 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11989 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11990 }
11991
11992 /* Add the base and index registers to the disp. */
11993 switch (bRm & X86_MODRM_RM_MASK)
11994 {
11995 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11996 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11997 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11998 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11999 case 4: u16EffAddr += pCtx->si; break;
12000 case 5: u16EffAddr += pCtx->di; break;
12001 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12002 case 7: u16EffAddr += pCtx->bx; break;
12003 }
12004 }
12005
12006 *pGCPtrEff = u16EffAddr;
12007 }
12008 else
12009 {
12010 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12011 uint32_t u32EffAddr;
12012
12013 /* Handle the disp32 form with no registers first. */
12014 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12015 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12016 else
12017 {
12018 /* Get the register (or SIB) value. */
12019 switch ((bRm & X86_MODRM_RM_MASK))
12020 {
12021 case 0: u32EffAddr = pCtx->eax; break;
12022 case 1: u32EffAddr = pCtx->ecx; break;
12023 case 2: u32EffAddr = pCtx->edx; break;
12024 case 3: u32EffAddr = pCtx->ebx; break;
12025 case 4: /* SIB */
12026 {
12027 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12028
12029 /* Get the index and scale it. */
12030 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12031 {
12032 case 0: u32EffAddr = pCtx->eax; break;
12033 case 1: u32EffAddr = pCtx->ecx; break;
12034 case 2: u32EffAddr = pCtx->edx; break;
12035 case 3: u32EffAddr = pCtx->ebx; break;
12036 case 4: u32EffAddr = 0; /*none */ break;
12037 case 5: u32EffAddr = pCtx->ebp; break;
12038 case 6: u32EffAddr = pCtx->esi; break;
12039 case 7: u32EffAddr = pCtx->edi; break;
12040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12041 }
12042 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12043
12044 /* add base */
12045 switch (bSib & X86_SIB_BASE_MASK)
12046 {
12047 case 0: u32EffAddr += pCtx->eax; break;
12048 case 1: u32EffAddr += pCtx->ecx; break;
12049 case 2: u32EffAddr += pCtx->edx; break;
12050 case 3: u32EffAddr += pCtx->ebx; break;
12051 case 4:
12052 u32EffAddr += pCtx->esp + offRsp;
12053 SET_SS_DEF();
12054 break;
12055 case 5:
12056 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12057 {
12058 u32EffAddr += pCtx->ebp;
12059 SET_SS_DEF();
12060 }
12061 else
12062 {
12063 uint32_t u32Disp;
12064 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12065 u32EffAddr += u32Disp;
12066 }
12067 break;
12068 case 6: u32EffAddr += pCtx->esi; break;
12069 case 7: u32EffAddr += pCtx->edi; break;
12070 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12071 }
12072 break;
12073 }
12074 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12075 case 6: u32EffAddr = pCtx->esi; break;
12076 case 7: u32EffAddr = pCtx->edi; break;
12077 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12078 }
12079
12080 /* Get and add the displacement. */
12081 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12082 {
12083 case 0:
12084 break;
12085 case 1:
12086 {
12087 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12088 u32EffAddr += i8Disp;
12089 break;
12090 }
12091 case 2:
12092 {
12093 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12094 u32EffAddr += u32Disp;
12095 break;
12096 }
12097 default:
12098 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12099 }
12100
12101 }
12102 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12103 *pGCPtrEff = u32EffAddr;
12104 else
12105 {
12106 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12107 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12108 }
12109 }
12110 }
12111 else
12112 {
12113 uint64_t u64EffAddr;
12114
12115 /* Handle the rip+disp32 form with no registers first. */
12116 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12117 {
12118 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12119 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12120 }
12121 else
12122 {
12123 /* Get the register (or SIB) value. */
12124 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12125 {
12126 case 0: u64EffAddr = pCtx->rax; break;
12127 case 1: u64EffAddr = pCtx->rcx; break;
12128 case 2: u64EffAddr = pCtx->rdx; break;
12129 case 3: u64EffAddr = pCtx->rbx; break;
12130 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12131 case 6: u64EffAddr = pCtx->rsi; break;
12132 case 7: u64EffAddr = pCtx->rdi; break;
12133 case 8: u64EffAddr = pCtx->r8; break;
12134 case 9: u64EffAddr = pCtx->r9; break;
12135 case 10: u64EffAddr = pCtx->r10; break;
12136 case 11: u64EffAddr = pCtx->r11; break;
12137 case 13: u64EffAddr = pCtx->r13; break;
12138 case 14: u64EffAddr = pCtx->r14; break;
12139 case 15: u64EffAddr = pCtx->r15; break;
12140 /* SIB */
12141 case 4:
12142 case 12:
12143 {
12144 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12145
12146 /* Get the index and scale it. */
12147 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12148 {
12149 case 0: u64EffAddr = pCtx->rax; break;
12150 case 1: u64EffAddr = pCtx->rcx; break;
12151 case 2: u64EffAddr = pCtx->rdx; break;
12152 case 3: u64EffAddr = pCtx->rbx; break;
12153 case 4: u64EffAddr = 0; /*none */ break;
12154 case 5: u64EffAddr = pCtx->rbp; break;
12155 case 6: u64EffAddr = pCtx->rsi; break;
12156 case 7: u64EffAddr = pCtx->rdi; break;
12157 case 8: u64EffAddr = pCtx->r8; break;
12158 case 9: u64EffAddr = pCtx->r9; break;
12159 case 10: u64EffAddr = pCtx->r10; break;
12160 case 11: u64EffAddr = pCtx->r11; break;
12161 case 12: u64EffAddr = pCtx->r12; break;
12162 case 13: u64EffAddr = pCtx->r13; break;
12163 case 14: u64EffAddr = pCtx->r14; break;
12164 case 15: u64EffAddr = pCtx->r15; break;
12165 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12166 }
12167 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12168
12169 /* add base */
12170 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12171 {
12172 case 0: u64EffAddr += pCtx->rax; break;
12173 case 1: u64EffAddr += pCtx->rcx; break;
12174 case 2: u64EffAddr += pCtx->rdx; break;
12175 case 3: u64EffAddr += pCtx->rbx; break;
12176 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12177 case 6: u64EffAddr += pCtx->rsi; break;
12178 case 7: u64EffAddr += pCtx->rdi; break;
12179 case 8: u64EffAddr += pCtx->r8; break;
12180 case 9: u64EffAddr += pCtx->r9; break;
12181 case 10: u64EffAddr += pCtx->r10; break;
12182 case 11: u64EffAddr += pCtx->r11; break;
12183 case 12: u64EffAddr += pCtx->r12; break;
12184 case 14: u64EffAddr += pCtx->r14; break;
12185 case 15: u64EffAddr += pCtx->r15; break;
12186 /* complicated encodings */
12187 case 5:
12188 case 13:
12189 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12190 {
12191 if (!pVCpu->iem.s.uRexB)
12192 {
12193 u64EffAddr += pCtx->rbp;
12194 SET_SS_DEF();
12195 }
12196 else
12197 u64EffAddr += pCtx->r13;
12198 }
12199 else
12200 {
12201 uint32_t u32Disp;
12202 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12203 u64EffAddr += (int32_t)u32Disp;
12204 }
12205 break;
12206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12207 }
12208 break;
12209 }
12210 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12211 }
12212
12213 /* Get and add the displacement. */
12214 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12215 {
12216 case 0:
12217 break;
12218 case 1:
12219 {
12220 int8_t i8Disp;
12221 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12222 u64EffAddr += i8Disp;
12223 break;
12224 }
12225 case 2:
12226 {
12227 uint32_t u32Disp;
12228 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12229 u64EffAddr += (int32_t)u32Disp;
12230 break;
12231 }
12232 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12233 }
12234
12235 }
12236
12237 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12238 *pGCPtrEff = u64EffAddr;
12239 else
12240 {
12241 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12242 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12243 }
12244 }
12245
12246 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12247 return VINF_SUCCESS;
12248}
12249
12250
12251#ifdef IEM_WITH_SETJMP
12252/**
12253 * Calculates the effective address of a ModR/M memory operand.
12254 *
12255 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12256 *
12257 * May longjmp on internal error.
12258 *
12259 * @return The effective address.
12260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12261 * @param bRm The ModRM byte.
12262 * @param cbImm The size of any immediate following the
12263 * effective address opcode bytes. Important for
12264 * RIP relative addressing.
12265 */
12266IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12267{
12268 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12269 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12270# define SET_SS_DEF() \
12271 do \
12272 { \
12273 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12274 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12275 } while (0)
12276
12277 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12278 {
12279/** @todo Check the effective address size crap! */
12280 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12281 {
12282 uint16_t u16EffAddr;
12283
12284 /* Handle the disp16 form with no registers first. */
12285 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12286 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12287 else
12288 {
12289 /* Get the displacment. */
12290 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12291 {
12292 case 0: u16EffAddr = 0; break;
12293 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12294 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12295 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12296 }
12297
12298 /* Add the base and index registers to the disp. */
12299 switch (bRm & X86_MODRM_RM_MASK)
12300 {
12301 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12302 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12303 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12304 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12305 case 4: u16EffAddr += pCtx->si; break;
12306 case 5: u16EffAddr += pCtx->di; break;
12307 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12308 case 7: u16EffAddr += pCtx->bx; break;
12309 }
12310 }
12311
12312 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12313 return u16EffAddr;
12314 }
12315
12316 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12317 uint32_t u32EffAddr;
12318
12319 /* Handle the disp32 form with no registers first. */
12320 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12321 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12322 else
12323 {
12324 /* Get the register (or SIB) value. */
12325 switch ((bRm & X86_MODRM_RM_MASK))
12326 {
12327 case 0: u32EffAddr = pCtx->eax; break;
12328 case 1: u32EffAddr = pCtx->ecx; break;
12329 case 2: u32EffAddr = pCtx->edx; break;
12330 case 3: u32EffAddr = pCtx->ebx; break;
12331 case 4: /* SIB */
12332 {
12333 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12334
12335 /* Get the index and scale it. */
12336 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12337 {
12338 case 0: u32EffAddr = pCtx->eax; break;
12339 case 1: u32EffAddr = pCtx->ecx; break;
12340 case 2: u32EffAddr = pCtx->edx; break;
12341 case 3: u32EffAddr = pCtx->ebx; break;
12342 case 4: u32EffAddr = 0; /*none */ break;
12343 case 5: u32EffAddr = pCtx->ebp; break;
12344 case 6: u32EffAddr = pCtx->esi; break;
12345 case 7: u32EffAddr = pCtx->edi; break;
12346 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12347 }
12348 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12349
12350 /* add base */
12351 switch (bSib & X86_SIB_BASE_MASK)
12352 {
12353 case 0: u32EffAddr += pCtx->eax; break;
12354 case 1: u32EffAddr += pCtx->ecx; break;
12355 case 2: u32EffAddr += pCtx->edx; break;
12356 case 3: u32EffAddr += pCtx->ebx; break;
12357 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12358 case 5:
12359 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12360 {
12361 u32EffAddr += pCtx->ebp;
12362 SET_SS_DEF();
12363 }
12364 else
12365 {
12366 uint32_t u32Disp;
12367 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12368 u32EffAddr += u32Disp;
12369 }
12370 break;
12371 case 6: u32EffAddr += pCtx->esi; break;
12372 case 7: u32EffAddr += pCtx->edi; break;
12373 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12374 }
12375 break;
12376 }
12377 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12378 case 6: u32EffAddr = pCtx->esi; break;
12379 case 7: u32EffAddr = pCtx->edi; break;
12380 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12381 }
12382
12383 /* Get and add the displacement. */
12384 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12385 {
12386 case 0:
12387 break;
12388 case 1:
12389 {
12390 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12391 u32EffAddr += i8Disp;
12392 break;
12393 }
12394 case 2:
12395 {
12396 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12397 u32EffAddr += u32Disp;
12398 break;
12399 }
12400 default:
12401 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12402 }
12403 }
12404
12405 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12406 {
12407 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12408 return u32EffAddr;
12409 }
12410 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12411 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12412 return u32EffAddr & UINT16_MAX;
12413 }
12414
12415 uint64_t u64EffAddr;
12416
12417 /* Handle the rip+disp32 form with no registers first. */
12418 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12419 {
12420 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12421 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12422 }
12423 else
12424 {
12425 /* Get the register (or SIB) value. */
12426 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12427 {
12428 case 0: u64EffAddr = pCtx->rax; break;
12429 case 1: u64EffAddr = pCtx->rcx; break;
12430 case 2: u64EffAddr = pCtx->rdx; break;
12431 case 3: u64EffAddr = pCtx->rbx; break;
12432 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12433 case 6: u64EffAddr = pCtx->rsi; break;
12434 case 7: u64EffAddr = pCtx->rdi; break;
12435 case 8: u64EffAddr = pCtx->r8; break;
12436 case 9: u64EffAddr = pCtx->r9; break;
12437 case 10: u64EffAddr = pCtx->r10; break;
12438 case 11: u64EffAddr = pCtx->r11; break;
12439 case 13: u64EffAddr = pCtx->r13; break;
12440 case 14: u64EffAddr = pCtx->r14; break;
12441 case 15: u64EffAddr = pCtx->r15; break;
12442 /* SIB */
12443 case 4:
12444 case 12:
12445 {
12446 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12447
12448 /* Get the index and scale it. */
12449 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12450 {
12451 case 0: u64EffAddr = pCtx->rax; break;
12452 case 1: u64EffAddr = pCtx->rcx; break;
12453 case 2: u64EffAddr = pCtx->rdx; break;
12454 case 3: u64EffAddr = pCtx->rbx; break;
12455 case 4: u64EffAddr = 0; /*none */ break;
12456 case 5: u64EffAddr = pCtx->rbp; break;
12457 case 6: u64EffAddr = pCtx->rsi; break;
12458 case 7: u64EffAddr = pCtx->rdi; break;
12459 case 8: u64EffAddr = pCtx->r8; break;
12460 case 9: u64EffAddr = pCtx->r9; break;
12461 case 10: u64EffAddr = pCtx->r10; break;
12462 case 11: u64EffAddr = pCtx->r11; break;
12463 case 12: u64EffAddr = pCtx->r12; break;
12464 case 13: u64EffAddr = pCtx->r13; break;
12465 case 14: u64EffAddr = pCtx->r14; break;
12466 case 15: u64EffAddr = pCtx->r15; break;
12467 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12468 }
12469 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12470
12471 /* add base */
12472 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12473 {
12474 case 0: u64EffAddr += pCtx->rax; break;
12475 case 1: u64EffAddr += pCtx->rcx; break;
12476 case 2: u64EffAddr += pCtx->rdx; break;
12477 case 3: u64EffAddr += pCtx->rbx; break;
12478 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12479 case 6: u64EffAddr += pCtx->rsi; break;
12480 case 7: u64EffAddr += pCtx->rdi; break;
12481 case 8: u64EffAddr += pCtx->r8; break;
12482 case 9: u64EffAddr += pCtx->r9; break;
12483 case 10: u64EffAddr += pCtx->r10; break;
12484 case 11: u64EffAddr += pCtx->r11; break;
12485 case 12: u64EffAddr += pCtx->r12; break;
12486 case 14: u64EffAddr += pCtx->r14; break;
12487 case 15: u64EffAddr += pCtx->r15; break;
12488 /* complicated encodings */
12489 case 5:
12490 case 13:
12491 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12492 {
12493 if (!pVCpu->iem.s.uRexB)
12494 {
12495 u64EffAddr += pCtx->rbp;
12496 SET_SS_DEF();
12497 }
12498 else
12499 u64EffAddr += pCtx->r13;
12500 }
12501 else
12502 {
12503 uint32_t u32Disp;
12504 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12505 u64EffAddr += (int32_t)u32Disp;
12506 }
12507 break;
12508 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12509 }
12510 break;
12511 }
12512 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12513 }
12514
12515 /* Get and add the displacement. */
12516 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12517 {
12518 case 0:
12519 break;
12520 case 1:
12521 {
12522 int8_t i8Disp;
12523 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12524 u64EffAddr += i8Disp;
12525 break;
12526 }
12527 case 2:
12528 {
12529 uint32_t u32Disp;
12530 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12531 u64EffAddr += (int32_t)u32Disp;
12532 break;
12533 }
12534 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12535 }
12536
12537 }
12538
12539 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12540 {
12541 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12542 return u64EffAddr;
12543 }
12544 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12545 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12546 return u64EffAddr & UINT32_MAX;
12547}
12548#endif /* IEM_WITH_SETJMP */
12549
12550
12551/** @} */
12552
12553
12554
12555/*
12556 * Include the instructions
12557 */
12558#include "IEMAllInstructions.cpp.h"
12559
12560
12561
12562
12563#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12564
12565/**
12566 * Sets up execution verification mode.
12567 */
12568IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12569{
12570 PVMCPU pVCpu = pVCpu;
12571 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12572
12573 /*
12574 * Always note down the address of the current instruction.
12575 */
12576 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12577 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12578
12579 /*
12580 * Enable verification and/or logging.
12581 */
12582 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12583 if ( fNewNoRem
12584 && ( 0
12585#if 0 /* auto enable on first paged protected mode interrupt */
12586 || ( pOrgCtx->eflags.Bits.u1IF
12587 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12588 && TRPMHasTrap(pVCpu)
12589 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12590#endif
12591#if 0
12592 || ( pOrgCtx->cs == 0x10
12593 && ( pOrgCtx->rip == 0x90119e3e
12594 || pOrgCtx->rip == 0x901d9810)
12595#endif
12596#if 0 /* Auto enable DSL - FPU stuff. */
12597 || ( pOrgCtx->cs == 0x10
12598 && (// pOrgCtx->rip == 0xc02ec07f
12599 //|| pOrgCtx->rip == 0xc02ec082
12600 //|| pOrgCtx->rip == 0xc02ec0c9
12601 0
12602 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12603#endif
12604#if 0 /* Auto enable DSL - fstp st0 stuff. */
12605 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12606#endif
12607#if 0
12608 || pOrgCtx->rip == 0x9022bb3a
12609#endif
12610#if 0
12611 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12612#endif
12613#if 0
12614 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12615 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12616#endif
12617#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12618 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12619 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12620 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12621#endif
12622#if 0 /* NT4SP1 - xadd early boot. */
12623 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12624#endif
12625#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12626 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12627#endif
12628#if 0 /* NT4SP1 - cmpxchg (AMD). */
12629 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12630#endif
12631#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12632 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12633#endif
12634#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12635 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12636
12637#endif
12638#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12639 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12640
12641#endif
12642#if 0 /* NT4SP1 - frstor [ecx] */
12643 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12644#endif
12645#if 0 /* xxxxxx - All long mode code. */
12646 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12647#endif
12648#if 0 /* rep movsq linux 3.7 64-bit boot. */
12649 || (pOrgCtx->rip == 0x0000000000100241)
12650#endif
12651#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12652 || (pOrgCtx->rip == 0x000000000215e240)
12653#endif
12654#if 0 /* DOS's size-overridden iret to v8086. */
12655 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12656#endif
12657 )
12658 )
12659 {
12660 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12661 RTLogFlags(NULL, "enabled");
12662 fNewNoRem = false;
12663 }
12664 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12665 {
12666 pVCpu->iem.s.fNoRem = fNewNoRem;
12667 if (!fNewNoRem)
12668 {
12669 LogAlways(("Enabling verification mode!\n"));
12670 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12671 }
12672 else
12673 LogAlways(("Disabling verification mode!\n"));
12674 }
12675
12676 /*
12677 * Switch state.
12678 */
12679 if (IEM_VERIFICATION_ENABLED(pVCpu))
12680 {
12681 static CPUMCTX s_DebugCtx; /* Ugly! */
12682
12683 s_DebugCtx = *pOrgCtx;
12684 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12685 }
12686
12687 /*
12688 * See if there is an interrupt pending in TRPM and inject it if we can.
12689 */
12690 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12691 if ( pOrgCtx->eflags.Bits.u1IF
12692 && TRPMHasTrap(pVCpu)
12693 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12694 {
12695 uint8_t u8TrapNo;
12696 TRPMEVENT enmType;
12697 RTGCUINT uErrCode;
12698 RTGCPTR uCr2;
12699 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12700 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12701 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12702 TRPMResetTrap(pVCpu);
12703 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12704 }
12705
12706 /*
12707 * Reset the counters.
12708 */
12709 pVCpu->iem.s.cIOReads = 0;
12710 pVCpu->iem.s.cIOWrites = 0;
12711 pVCpu->iem.s.fIgnoreRaxRdx = false;
12712 pVCpu->iem.s.fOverlappingMovs = false;
12713 pVCpu->iem.s.fProblematicMemory = false;
12714 pVCpu->iem.s.fUndefinedEFlags = 0;
12715
12716 if (IEM_VERIFICATION_ENABLED(pVCpu))
12717 {
12718 /*
12719 * Free all verification records.
12720 */
12721 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12722 pVCpu->iem.s.pIemEvtRecHead = NULL;
12723 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12724 do
12725 {
12726 while (pEvtRec)
12727 {
12728 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12729 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12730 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12731 pEvtRec = pNext;
12732 }
12733 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12734 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12735 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12736 } while (pEvtRec);
12737 }
12738}
12739
12740
12741/**
12742 * Allocate an event record.
12743 * @returns Pointer to a record.
12744 */
12745IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12746{
12747 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12748 return NULL;
12749
12750 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12751 if (pEvtRec)
12752 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12753 else
12754 {
12755 if (!pVCpu->iem.s.ppIemEvtRecNext)
12756 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12757
12758 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12759 if (!pEvtRec)
12760 return NULL;
12761 }
12762 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12763 pEvtRec->pNext = NULL;
12764 return pEvtRec;
12765}
12766
12767
12768/**
12769 * IOMMMIORead notification.
12770 */
12771VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12772{
12773 PVMCPU pVCpu = VMMGetCpu(pVM);
12774 if (!pVCpu)
12775 return;
12776 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12777 if (!pEvtRec)
12778 return;
12779 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12780 pEvtRec->u.RamRead.GCPhys = GCPhys;
12781 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12782 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12783 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12784}
12785
12786
12787/**
12788 * IOMMMIOWrite notification.
12789 */
12790VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12791{
12792 PVMCPU pVCpu = VMMGetCpu(pVM);
12793 if (!pVCpu)
12794 return;
12795 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12796 if (!pEvtRec)
12797 return;
12798 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12799 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12800 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12801 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12802 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12803 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12804 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12805 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12806 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12807}
12808
12809
12810/**
12811 * IOMIOPortRead notification.
12812 */
12813VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12814{
12815 PVMCPU pVCpu = VMMGetCpu(pVM);
12816 if (!pVCpu)
12817 return;
12818 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12819 if (!pEvtRec)
12820 return;
12821 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12822 pEvtRec->u.IOPortRead.Port = Port;
12823 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12824 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12825 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12826}
12827
12828/**
12829 * IOMIOPortWrite notification.
12830 */
12831VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12832{
12833 PVMCPU pVCpu = VMMGetCpu(pVM);
12834 if (!pVCpu)
12835 return;
12836 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12837 if (!pEvtRec)
12838 return;
12839 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12840 pEvtRec->u.IOPortWrite.Port = Port;
12841 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12842 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12843 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12844 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12845}
12846
12847
12848VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12849{
12850 PVMCPU pVCpu = VMMGetCpu(pVM);
12851 if (!pVCpu)
12852 return;
12853 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12854 if (!pEvtRec)
12855 return;
12856 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12857 pEvtRec->u.IOPortStrRead.Port = Port;
12858 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12859 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12860 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12861 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12862}
12863
12864
12865VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12866{
12867 PVMCPU pVCpu = VMMGetCpu(pVM);
12868 if (!pVCpu)
12869 return;
12870 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12871 if (!pEvtRec)
12872 return;
12873 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12874 pEvtRec->u.IOPortStrWrite.Port = Port;
12875 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12876 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12877 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12878 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12879}
12880
12881
12882/**
12883 * Fakes and records an I/O port read.
12884 *
12885 * @returns VINF_SUCCESS.
12886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12887 * @param Port The I/O port.
12888 * @param pu32Value Where to store the fake value.
12889 * @param cbValue The size of the access.
12890 */
12891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12892{
12893 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12894 if (pEvtRec)
12895 {
12896 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12897 pEvtRec->u.IOPortRead.Port = Port;
12898 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12899 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12900 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12901 }
12902 pVCpu->iem.s.cIOReads++;
12903 *pu32Value = 0xcccccccc;
12904 return VINF_SUCCESS;
12905}
12906
12907
12908/**
12909 * Fakes and records an I/O port write.
12910 *
12911 * @returns VINF_SUCCESS.
12912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12913 * @param Port The I/O port.
12914 * @param u32Value The value being written.
12915 * @param cbValue The size of the access.
12916 */
12917IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12918{
12919 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12920 if (pEvtRec)
12921 {
12922 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12923 pEvtRec->u.IOPortWrite.Port = Port;
12924 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12925 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12926 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12927 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12928 }
12929 pVCpu->iem.s.cIOWrites++;
12930 return VINF_SUCCESS;
12931}
12932
12933
12934/**
12935 * Used to add extra details about a stub case.
12936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12937 */
12938IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12939{
12940 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12941 PVM pVM = pVCpu->CTX_SUFF(pVM);
12942 PVMCPU pVCpu = pVCpu;
12943 char szRegs[4096];
12944 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12945 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12946 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12947 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12948 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12949 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12950 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12951 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12952 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12953 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12954 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12955 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12956 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12957 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12958 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12959 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12960 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12961 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12962 " efer=%016VR{efer}\n"
12963 " pat=%016VR{pat}\n"
12964 " sf_mask=%016VR{sf_mask}\n"
12965 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12966 " lstar=%016VR{lstar}\n"
12967 " star=%016VR{star} cstar=%016VR{cstar}\n"
12968 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12969 );
12970
12971 char szInstr1[256];
12972 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12973 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12974 szInstr1, sizeof(szInstr1), NULL);
12975 char szInstr2[256];
12976 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12977 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12978 szInstr2, sizeof(szInstr2), NULL);
12979
12980 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12981}
12982
12983
12984/**
12985 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12986 * dump to the assertion info.
12987 *
12988 * @param pEvtRec The record to dump.
12989 */
12990IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12991{
12992 switch (pEvtRec->enmEvent)
12993 {
12994 case IEMVERIFYEVENT_IOPORT_READ:
12995 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12996 pEvtRec->u.IOPortWrite.Port,
12997 pEvtRec->u.IOPortWrite.cbValue);
12998 break;
12999 case IEMVERIFYEVENT_IOPORT_WRITE:
13000 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13001 pEvtRec->u.IOPortWrite.Port,
13002 pEvtRec->u.IOPortWrite.cbValue,
13003 pEvtRec->u.IOPortWrite.u32Value);
13004 break;
13005 case IEMVERIFYEVENT_IOPORT_STR_READ:
13006 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13007 pEvtRec->u.IOPortStrWrite.Port,
13008 pEvtRec->u.IOPortStrWrite.cbValue,
13009 pEvtRec->u.IOPortStrWrite.cTransfers);
13010 break;
13011 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13012 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13013 pEvtRec->u.IOPortStrWrite.Port,
13014 pEvtRec->u.IOPortStrWrite.cbValue,
13015 pEvtRec->u.IOPortStrWrite.cTransfers);
13016 break;
13017 case IEMVERIFYEVENT_RAM_READ:
13018 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13019 pEvtRec->u.RamRead.GCPhys,
13020 pEvtRec->u.RamRead.cb);
13021 break;
13022 case IEMVERIFYEVENT_RAM_WRITE:
13023 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13024 pEvtRec->u.RamWrite.GCPhys,
13025 pEvtRec->u.RamWrite.cb,
13026 (int)pEvtRec->u.RamWrite.cb,
13027 pEvtRec->u.RamWrite.ab);
13028 break;
13029 default:
13030 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13031 break;
13032 }
13033}
13034
13035
13036/**
13037 * Raises an assertion on the specified record, showing the given message with
13038 * a record dump attached.
13039 *
13040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13041 * @param pEvtRec1 The first record.
13042 * @param pEvtRec2 The second record.
13043 * @param pszMsg The message explaining why we're asserting.
13044 */
13045IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13046{
13047 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13048 iemVerifyAssertAddRecordDump(pEvtRec1);
13049 iemVerifyAssertAddRecordDump(pEvtRec2);
13050 iemVerifyAssertMsg2(pVCpu);
13051 RTAssertPanic();
13052}
13053
13054
13055/**
13056 * Raises an assertion on the specified record, showing the given message with
13057 * a record dump attached.
13058 *
13059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13060 * @param pEvtRec1 The first record.
13061 * @param pszMsg The message explaining why we're asserting.
13062 */
13063IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13064{
13065 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13066 iemVerifyAssertAddRecordDump(pEvtRec);
13067 iemVerifyAssertMsg2(pVCpu);
13068 RTAssertPanic();
13069}
13070
13071
13072/**
13073 * Verifies a write record.
13074 *
13075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13076 * @param pEvtRec The write record.
13077 * @param fRem Set if REM was doing the other executing. If clear
13078 * it was HM.
13079 */
13080IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13081{
13082 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13083 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13084 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13085 if ( RT_FAILURE(rc)
13086 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13087 {
13088 /* fend off ins */
13089 if ( !pVCpu->iem.s.cIOReads
13090 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13091 || ( pEvtRec->u.RamWrite.cb != 1
13092 && pEvtRec->u.RamWrite.cb != 2
13093 && pEvtRec->u.RamWrite.cb != 4) )
13094 {
13095 /* fend off ROMs and MMIO */
13096 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13097 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13098 {
13099 /* fend off fxsave */
13100 if (pEvtRec->u.RamWrite.cb != 512)
13101 {
13102 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13103 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13104 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13105 RTAssertMsg2Add("%s: %.*Rhxs\n"
13106 "iem: %.*Rhxs\n",
13107 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13108 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13109 iemVerifyAssertAddRecordDump(pEvtRec);
13110 iemVerifyAssertMsg2(pVCpu);
13111 RTAssertPanic();
13112 }
13113 }
13114 }
13115 }
13116
13117}
13118
13119/**
13120 * Performs the post-execution verfication checks.
13121 */
13122IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13123{
13124 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13125 return rcStrictIem;
13126
13127 /*
13128 * Switch back the state.
13129 */
13130 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13131 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13132 Assert(pOrgCtx != pDebugCtx);
13133 IEM_GET_CTX(pVCpu) = pOrgCtx;
13134
13135 /*
13136 * Execute the instruction in REM.
13137 */
13138 bool fRem = false;
13139 PVM pVM = pVCpu->CTX_SUFF(pVM);
13140 PVMCPU pVCpu = pVCpu;
13141 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13142#ifdef IEM_VERIFICATION_MODE_FULL_HM
13143 if ( HMIsEnabled(pVM)
13144 && pVCpu->iem.s.cIOReads == 0
13145 && pVCpu->iem.s.cIOWrites == 0
13146 && !pVCpu->iem.s.fProblematicMemory)
13147 {
13148 uint64_t uStartRip = pOrgCtx->rip;
13149 unsigned iLoops = 0;
13150 do
13151 {
13152 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13153 iLoops++;
13154 } while ( rc == VINF_SUCCESS
13155 || ( rc == VINF_EM_DBG_STEPPED
13156 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13157 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13158 || ( pOrgCtx->rip != pDebugCtx->rip
13159 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13160 && iLoops < 8) );
13161 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13162 rc = VINF_SUCCESS;
13163 }
13164#endif
13165 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13166 || rc == VINF_IOM_R3_IOPORT_READ
13167 || rc == VINF_IOM_R3_IOPORT_WRITE
13168 || rc == VINF_IOM_R3_MMIO_READ
13169 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13170 || rc == VINF_IOM_R3_MMIO_WRITE
13171 || rc == VINF_CPUM_R3_MSR_READ
13172 || rc == VINF_CPUM_R3_MSR_WRITE
13173 || rc == VINF_EM_RESCHEDULE
13174 )
13175 {
13176 EMRemLock(pVM);
13177 rc = REMR3EmulateInstruction(pVM, pVCpu);
13178 AssertRC(rc);
13179 EMRemUnlock(pVM);
13180 fRem = true;
13181 }
13182
13183# if 1 /* Skip unimplemented instructions for now. */
13184 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13185 {
13186 IEM_GET_CTX(pVCpu) = pOrgCtx;
13187 if (rc == VINF_EM_DBG_STEPPED)
13188 return VINF_SUCCESS;
13189 return rc;
13190 }
13191# endif
13192
13193 /*
13194 * Compare the register states.
13195 */
13196 unsigned cDiffs = 0;
13197 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13198 {
13199 //Log(("REM and IEM ends up with different registers!\n"));
13200 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13201
13202# define CHECK_FIELD(a_Field) \
13203 do \
13204 { \
13205 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13206 { \
13207 switch (sizeof(pOrgCtx->a_Field)) \
13208 { \
13209 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13210 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13211 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13212 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13213 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13214 } \
13215 cDiffs++; \
13216 } \
13217 } while (0)
13218# define CHECK_XSTATE_FIELD(a_Field) \
13219 do \
13220 { \
13221 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13222 { \
13223 switch (sizeof(pOrgXState->a_Field)) \
13224 { \
13225 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13226 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13227 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13228 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13229 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13230 } \
13231 cDiffs++; \
13232 } \
13233 } while (0)
13234
13235# define CHECK_BIT_FIELD(a_Field) \
13236 do \
13237 { \
13238 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13239 { \
13240 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13241 cDiffs++; \
13242 } \
13243 } while (0)
13244
13245# define CHECK_SEL(a_Sel) \
13246 do \
13247 { \
13248 CHECK_FIELD(a_Sel.Sel); \
13249 CHECK_FIELD(a_Sel.Attr.u); \
13250 CHECK_FIELD(a_Sel.u64Base); \
13251 CHECK_FIELD(a_Sel.u32Limit); \
13252 CHECK_FIELD(a_Sel.fFlags); \
13253 } while (0)
13254
13255 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13256 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13257
13258#if 1 /* The recompiler doesn't update these the intel way. */
13259 if (fRem)
13260 {
13261 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13262 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13263 pOrgXState->x87.CS = pDebugXState->x87.CS;
13264 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13265 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13266 pOrgXState->x87.DS = pDebugXState->x87.DS;
13267 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13268 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13269 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13270 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13271 }
13272#endif
13273 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13274 {
13275 RTAssertMsg2Weak(" the FPU state differs\n");
13276 cDiffs++;
13277 CHECK_XSTATE_FIELD(x87.FCW);
13278 CHECK_XSTATE_FIELD(x87.FSW);
13279 CHECK_XSTATE_FIELD(x87.FTW);
13280 CHECK_XSTATE_FIELD(x87.FOP);
13281 CHECK_XSTATE_FIELD(x87.FPUIP);
13282 CHECK_XSTATE_FIELD(x87.CS);
13283 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13284 CHECK_XSTATE_FIELD(x87.FPUDP);
13285 CHECK_XSTATE_FIELD(x87.DS);
13286 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13287 CHECK_XSTATE_FIELD(x87.MXCSR);
13288 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13289 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13290 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13291 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13292 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13293 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13294 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13295 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13296 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13297 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13298 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13299 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13300 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13301 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13302 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13303 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13304 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13305 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13306 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13307 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13308 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13309 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13310 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13311 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13312 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13313 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13314 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13315 }
13316 CHECK_FIELD(rip);
13317 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13318 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13319 {
13320 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13321 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13322 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13323 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13324 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13325 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13326 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13327 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13328 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13329 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13330 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13331 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13332 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13333 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13334 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13335 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13336 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13337 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13338 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13339 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13340 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13341 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13342 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13343 }
13344
13345 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13346 CHECK_FIELD(rax);
13347 CHECK_FIELD(rcx);
13348 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13349 CHECK_FIELD(rdx);
13350 CHECK_FIELD(rbx);
13351 CHECK_FIELD(rsp);
13352 CHECK_FIELD(rbp);
13353 CHECK_FIELD(rsi);
13354 CHECK_FIELD(rdi);
13355 CHECK_FIELD(r8);
13356 CHECK_FIELD(r9);
13357 CHECK_FIELD(r10);
13358 CHECK_FIELD(r11);
13359 CHECK_FIELD(r12);
13360 CHECK_FIELD(r13);
13361 CHECK_SEL(cs);
13362 CHECK_SEL(ss);
13363 CHECK_SEL(ds);
13364 CHECK_SEL(es);
13365 CHECK_SEL(fs);
13366 CHECK_SEL(gs);
13367 CHECK_FIELD(cr0);
13368
13369 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13370 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13371 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13372 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13373 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13374 {
13375 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13376 { /* ignore */ }
13377 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13378 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13379 && fRem)
13380 { /* ignore */ }
13381 else
13382 CHECK_FIELD(cr2);
13383 }
13384 CHECK_FIELD(cr3);
13385 CHECK_FIELD(cr4);
13386 CHECK_FIELD(dr[0]);
13387 CHECK_FIELD(dr[1]);
13388 CHECK_FIELD(dr[2]);
13389 CHECK_FIELD(dr[3]);
13390 CHECK_FIELD(dr[6]);
13391 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13392 CHECK_FIELD(dr[7]);
13393 CHECK_FIELD(gdtr.cbGdt);
13394 CHECK_FIELD(gdtr.pGdt);
13395 CHECK_FIELD(idtr.cbIdt);
13396 CHECK_FIELD(idtr.pIdt);
13397 CHECK_SEL(ldtr);
13398 CHECK_SEL(tr);
13399 CHECK_FIELD(SysEnter.cs);
13400 CHECK_FIELD(SysEnter.eip);
13401 CHECK_FIELD(SysEnter.esp);
13402 CHECK_FIELD(msrEFER);
13403 CHECK_FIELD(msrSTAR);
13404 CHECK_FIELD(msrPAT);
13405 CHECK_FIELD(msrLSTAR);
13406 CHECK_FIELD(msrCSTAR);
13407 CHECK_FIELD(msrSFMASK);
13408 CHECK_FIELD(msrKERNELGSBASE);
13409
13410 if (cDiffs != 0)
13411 {
13412 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13413 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13414 RTAssertPanic();
13415 static bool volatile s_fEnterDebugger = true;
13416 if (s_fEnterDebugger)
13417 DBGFSTOP(pVM);
13418
13419# if 1 /* Ignore unimplemented instructions for now. */
13420 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13421 rcStrictIem = VINF_SUCCESS;
13422# endif
13423 }
13424# undef CHECK_FIELD
13425# undef CHECK_BIT_FIELD
13426 }
13427
13428 /*
13429 * If the register state compared fine, check the verification event
13430 * records.
13431 */
13432 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13433 {
13434 /*
13435 * Compare verficiation event records.
13436 * - I/O port accesses should be a 1:1 match.
13437 */
13438 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13439 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13440 while (pIemRec && pOtherRec)
13441 {
13442 /* Since we might miss RAM writes and reads, ignore reads and check
13443 that any written memory is the same extra ones. */
13444 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13445 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13446 && pIemRec->pNext)
13447 {
13448 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13449 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13450 pIemRec = pIemRec->pNext;
13451 }
13452
13453 /* Do the compare. */
13454 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13455 {
13456 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13457 break;
13458 }
13459 bool fEquals;
13460 switch (pIemRec->enmEvent)
13461 {
13462 case IEMVERIFYEVENT_IOPORT_READ:
13463 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13464 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13465 break;
13466 case IEMVERIFYEVENT_IOPORT_WRITE:
13467 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13468 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13469 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13470 break;
13471 case IEMVERIFYEVENT_IOPORT_STR_READ:
13472 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13473 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13474 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13475 break;
13476 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13477 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13478 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13479 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13480 break;
13481 case IEMVERIFYEVENT_RAM_READ:
13482 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13483 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13484 break;
13485 case IEMVERIFYEVENT_RAM_WRITE:
13486 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13487 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13488 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13489 break;
13490 default:
13491 fEquals = false;
13492 break;
13493 }
13494 if (!fEquals)
13495 {
13496 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13497 break;
13498 }
13499
13500 /* advance */
13501 pIemRec = pIemRec->pNext;
13502 pOtherRec = pOtherRec->pNext;
13503 }
13504
13505 /* Ignore extra writes and reads. */
13506 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13507 {
13508 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13509 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13510 pIemRec = pIemRec->pNext;
13511 }
13512 if (pIemRec != NULL)
13513 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13514 else if (pOtherRec != NULL)
13515 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13516 }
13517 IEM_GET_CTX(pVCpu) = pOrgCtx;
13518
13519 return rcStrictIem;
13520}
13521
13522#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13523
13524/* stubs */
13525IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13526{
13527 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13528 return VERR_INTERNAL_ERROR;
13529}
13530
13531IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13532{
13533 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13534 return VERR_INTERNAL_ERROR;
13535}
13536
13537#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13538
13539
13540#ifdef LOG_ENABLED
13541/**
13542 * Logs the current instruction.
13543 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13544 * @param pCtx The current CPU context.
13545 * @param fSameCtx Set if we have the same context information as the VMM,
13546 * clear if we may have already executed an instruction in
13547 * our debug context. When clear, we assume IEMCPU holds
13548 * valid CPU mode info.
13549 */
13550IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13551{
13552# ifdef IN_RING3
13553 if (LogIs2Enabled())
13554 {
13555 char szInstr[256];
13556 uint32_t cbInstr = 0;
13557 if (fSameCtx)
13558 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13559 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13560 szInstr, sizeof(szInstr), &cbInstr);
13561 else
13562 {
13563 uint32_t fFlags = 0;
13564 switch (pVCpu->iem.s.enmCpuMode)
13565 {
13566 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13567 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13568 case IEMMODE_16BIT:
13569 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13570 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13571 else
13572 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13573 break;
13574 }
13575 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13576 szInstr, sizeof(szInstr), &cbInstr);
13577 }
13578
13579 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13580 Log2(("****\n"
13581 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13582 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13583 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13584 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13585 " %s\n"
13586 ,
13587 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13588 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13589 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13590 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13591 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13592 szInstr));
13593
13594 if (LogIs3Enabled())
13595 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13596 }
13597 else
13598# endif
13599 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13600 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13601 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13602}
13603#endif
13604
13605
13606/**
13607 * Makes status code addjustments (pass up from I/O and access handler)
13608 * as well as maintaining statistics.
13609 *
13610 * @returns Strict VBox status code to pass up.
13611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13612 * @param rcStrict The status from executing an instruction.
13613 */
13614DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13615{
13616 if (rcStrict != VINF_SUCCESS)
13617 {
13618 if (RT_SUCCESS(rcStrict))
13619 {
13620 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13621 || rcStrict == VINF_IOM_R3_IOPORT_READ
13622 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13623 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13624 || rcStrict == VINF_IOM_R3_MMIO_READ
13625 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13626 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13627 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13628 || rcStrict == VINF_CPUM_R3_MSR_READ
13629 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13630 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13631 || rcStrict == VINF_EM_RAW_TO_R3
13632 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13633 /* raw-mode / virt handlers only: */
13634 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13635 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13636 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13637 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13638 || rcStrict == VINF_SELM_SYNC_GDT
13639 || rcStrict == VINF_CSAM_PENDING_ACTION
13640 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13641 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13642/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13643 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13644 if (rcPassUp == VINF_SUCCESS)
13645 pVCpu->iem.s.cRetInfStatuses++;
13646 else if ( rcPassUp < VINF_EM_FIRST
13647 || rcPassUp > VINF_EM_LAST
13648 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13649 {
13650 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13651 pVCpu->iem.s.cRetPassUpStatus++;
13652 rcStrict = rcPassUp;
13653 }
13654 else
13655 {
13656 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13657 pVCpu->iem.s.cRetInfStatuses++;
13658 }
13659 }
13660 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13661 pVCpu->iem.s.cRetAspectNotImplemented++;
13662 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13663 pVCpu->iem.s.cRetInstrNotImplemented++;
13664#ifdef IEM_VERIFICATION_MODE_FULL
13665 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13666 rcStrict = VINF_SUCCESS;
13667#endif
13668 else
13669 pVCpu->iem.s.cRetErrStatuses++;
13670 }
13671 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13672 {
13673 pVCpu->iem.s.cRetPassUpStatus++;
13674 rcStrict = pVCpu->iem.s.rcPassUp;
13675 }
13676
13677 return rcStrict;
13678}
13679
13680
13681/**
13682 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13683 * IEMExecOneWithPrefetchedByPC.
13684 *
13685 * Similar code is found in IEMExecLots.
13686 *
13687 * @return Strict VBox status code.
13688 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13690 * @param fExecuteInhibit If set, execute the instruction following CLI,
13691 * POP SS and MOV SS,GR.
13692 */
13693DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13694{
13695#ifdef IEM_WITH_SETJMP
13696 VBOXSTRICTRC rcStrict;
13697 jmp_buf JmpBuf;
13698 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13699 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13700 if ((rcStrict = setjmp(JmpBuf)) == 0)
13701 {
13702 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13703 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13704 }
13705 else
13706 pVCpu->iem.s.cLongJumps++;
13707 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13708#else
13709 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13710 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13711#endif
13712 if (rcStrict == VINF_SUCCESS)
13713 pVCpu->iem.s.cInstructions++;
13714 if (pVCpu->iem.s.cActiveMappings > 0)
13715 {
13716 Assert(rcStrict != VINF_SUCCESS);
13717 iemMemRollback(pVCpu);
13718 }
13719//#ifdef DEBUG
13720// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13721//#endif
13722
13723 /* Execute the next instruction as well if a cli, pop ss or
13724 mov ss, Gr has just completed successfully. */
13725 if ( fExecuteInhibit
13726 && rcStrict == VINF_SUCCESS
13727 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13728 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13729 {
13730 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13731 if (rcStrict == VINF_SUCCESS)
13732 {
13733#ifdef LOG_ENABLED
13734 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13735#endif
13736#ifdef IEM_WITH_SETJMP
13737 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13738 if ((rcStrict = setjmp(JmpBuf)) == 0)
13739 {
13740 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13741 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13742 }
13743 else
13744 pVCpu->iem.s.cLongJumps++;
13745 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13746#else
13747 IEM_OPCODE_GET_NEXT_U8(&b);
13748 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13749#endif
13750 if (rcStrict == VINF_SUCCESS)
13751 pVCpu->iem.s.cInstructions++;
13752 if (pVCpu->iem.s.cActiveMappings > 0)
13753 {
13754 Assert(rcStrict != VINF_SUCCESS);
13755 iemMemRollback(pVCpu);
13756 }
13757 }
13758 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13759 }
13760
13761 /*
13762 * Return value fiddling, statistics and sanity assertions.
13763 */
13764 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13765
13766 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13767 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13768#if defined(IEM_VERIFICATION_MODE_FULL)
13769 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13770 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13771 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13773#endif
13774 return rcStrict;
13775}
13776
13777
13778#ifdef IN_RC
13779/**
13780 * Re-enters raw-mode or ensure we return to ring-3.
13781 *
13782 * @returns rcStrict, maybe modified.
13783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13784 * @param pCtx The current CPU context.
13785 * @param rcStrict The status code returne by the interpreter.
13786 */
13787DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13788{
13789 if ( !pVCpu->iem.s.fInPatchCode
13790 && ( rcStrict == VINF_SUCCESS
13791 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13792 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13793 {
13794 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13795 CPUMRawEnter(pVCpu);
13796 else
13797 {
13798 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13799 rcStrict = VINF_EM_RESCHEDULE;
13800 }
13801 }
13802 return rcStrict;
13803}
13804#endif
13805
13806
13807/**
13808 * Execute one instruction.
13809 *
13810 * @return Strict VBox status code.
13811 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13812 */
13813VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13814{
13815#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13816 if (++pVCpu->iem.s.cVerifyDepth == 1)
13817 iemExecVerificationModeSetup(pVCpu);
13818#endif
13819#ifdef LOG_ENABLED
13820 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13821 iemLogCurInstr(pVCpu, pCtx, true);
13822#endif
13823
13824 /*
13825 * Do the decoding and emulation.
13826 */
13827 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13828 if (rcStrict == VINF_SUCCESS)
13829 rcStrict = iemExecOneInner(pVCpu, true);
13830
13831#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13832 /*
13833 * Assert some sanity.
13834 */
13835 if (pVCpu->iem.s.cVerifyDepth == 1)
13836 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13837 pVCpu->iem.s.cVerifyDepth--;
13838#endif
13839#ifdef IN_RC
13840 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13841#endif
13842 if (rcStrict != VINF_SUCCESS)
13843 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13844 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13845 return rcStrict;
13846}
13847
13848
13849VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13850{
13851 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13852 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13853
13854 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13855 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13856 if (rcStrict == VINF_SUCCESS)
13857 {
13858 rcStrict = iemExecOneInner(pVCpu, true);
13859 if (pcbWritten)
13860 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13861 }
13862
13863#ifdef IN_RC
13864 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13865#endif
13866 return rcStrict;
13867}
13868
13869
13870VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13871 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13872{
13873 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13874 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13875
13876 VBOXSTRICTRC rcStrict;
13877 if ( cbOpcodeBytes
13878 && pCtx->rip == OpcodeBytesPC)
13879 {
13880 iemInitDecoder(pVCpu, false);
13881#ifdef IEM_WITH_CODE_TLB
13882 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13883 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13884 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13885 pVCpu->iem.s.offCurInstrStart = 0;
13886 pVCpu->iem.s.offInstrNextByte = 0;
13887#else
13888 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13889 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13890#endif
13891 rcStrict = VINF_SUCCESS;
13892 }
13893 else
13894 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13895 if (rcStrict == VINF_SUCCESS)
13896 {
13897 rcStrict = iemExecOneInner(pVCpu, true);
13898 }
13899
13900#ifdef IN_RC
13901 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13902#endif
13903 return rcStrict;
13904}
13905
13906
13907VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13908{
13909 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13910 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13911
13912 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13913 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13914 if (rcStrict == VINF_SUCCESS)
13915 {
13916 rcStrict = iemExecOneInner(pVCpu, false);
13917 if (pcbWritten)
13918 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13919 }
13920
13921#ifdef IN_RC
13922 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13923#endif
13924 return rcStrict;
13925}
13926
13927
13928VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13929 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13930{
13931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13932 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13933
13934 VBOXSTRICTRC rcStrict;
13935 if ( cbOpcodeBytes
13936 && pCtx->rip == OpcodeBytesPC)
13937 {
13938 iemInitDecoder(pVCpu, true);
13939#ifdef IEM_WITH_CODE_TLB
13940 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13941 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13942 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13943 pVCpu->iem.s.offCurInstrStart = 0;
13944 pVCpu->iem.s.offInstrNextByte = 0;
13945#else
13946 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13947 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13948#endif
13949 rcStrict = VINF_SUCCESS;
13950 }
13951 else
13952 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13953 if (rcStrict == VINF_SUCCESS)
13954 rcStrict = iemExecOneInner(pVCpu, false);
13955
13956#ifdef IN_RC
13957 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13958#endif
13959 return rcStrict;
13960}
13961
13962
13963/**
13964 * For debugging DISGetParamSize, may come in handy.
13965 *
13966 * @returns Strict VBox status code.
13967 * @param pVCpu The cross context virtual CPU structure of the
13968 * calling EMT.
13969 * @param pCtxCore The context core structure.
13970 * @param OpcodeBytesPC The PC of the opcode bytes.
13971 * @param pvOpcodeBytes Prefeched opcode bytes.
13972 * @param cbOpcodeBytes Number of prefetched bytes.
13973 * @param pcbWritten Where to return the number of bytes written.
13974 * Optional.
13975 */
13976VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13977 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13978 uint32_t *pcbWritten)
13979{
13980 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13981 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13982
13983 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13984 VBOXSTRICTRC rcStrict;
13985 if ( cbOpcodeBytes
13986 && pCtx->rip == OpcodeBytesPC)
13987 {
13988 iemInitDecoder(pVCpu, true);
13989#ifdef IEM_WITH_CODE_TLB
13990 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13991 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13992 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13993 pVCpu->iem.s.offCurInstrStart = 0;
13994 pVCpu->iem.s.offInstrNextByte = 0;
13995#else
13996 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13997 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13998#endif
13999 rcStrict = VINF_SUCCESS;
14000 }
14001 else
14002 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14003 if (rcStrict == VINF_SUCCESS)
14004 {
14005 rcStrict = iemExecOneInner(pVCpu, false);
14006 if (pcbWritten)
14007 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14008 }
14009
14010#ifdef IN_RC
14011 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14012#endif
14013 return rcStrict;
14014}
14015
14016
14017VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14018{
14019 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14020
14021#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14022 /*
14023 * See if there is an interrupt pending in TRPM, inject it if we can.
14024 */
14025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14026# ifdef IEM_VERIFICATION_MODE_FULL
14027 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14028# endif
14029 if ( pCtx->eflags.Bits.u1IF
14030 && TRPMHasTrap(pVCpu)
14031 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14032 {
14033 uint8_t u8TrapNo;
14034 TRPMEVENT enmType;
14035 RTGCUINT uErrCode;
14036 RTGCPTR uCr2;
14037 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14038 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14039 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14040 TRPMResetTrap(pVCpu);
14041 }
14042
14043 /*
14044 * Log the state.
14045 */
14046# ifdef LOG_ENABLED
14047 iemLogCurInstr(pVCpu, pCtx, true);
14048# endif
14049
14050 /*
14051 * Do the decoding and emulation.
14052 */
14053 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14054 if (rcStrict == VINF_SUCCESS)
14055 rcStrict = iemExecOneInner(pVCpu, true);
14056
14057 /*
14058 * Assert some sanity.
14059 */
14060 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14061
14062 /*
14063 * Log and return.
14064 */
14065 if (rcStrict != VINF_SUCCESS)
14066 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14067 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14068 if (pcInstructions)
14069 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14070 return rcStrict;
14071
14072#else /* Not verification mode */
14073
14074 /*
14075 * See if there is an interrupt pending in TRPM, inject it if we can.
14076 */
14077 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14078# ifdef IEM_VERIFICATION_MODE_FULL
14079 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14080# endif
14081 if ( pCtx->eflags.Bits.u1IF
14082 && TRPMHasTrap(pVCpu)
14083 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14084 {
14085 uint8_t u8TrapNo;
14086 TRPMEVENT enmType;
14087 RTGCUINT uErrCode;
14088 RTGCPTR uCr2;
14089 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14090 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14091 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14092 TRPMResetTrap(pVCpu);
14093 }
14094
14095 /*
14096 * Initial decoder init w/ prefetch, then setup setjmp.
14097 */
14098 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14099 if (rcStrict == VINF_SUCCESS)
14100 {
14101# ifdef IEM_WITH_SETJMP
14102 jmp_buf JmpBuf;
14103 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14104 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14105 pVCpu->iem.s.cActiveMappings = 0;
14106 if ((rcStrict = setjmp(JmpBuf)) == 0)
14107# endif
14108 {
14109 /*
14110 * The run loop. We limit ourselves to 4096 instructions right now.
14111 */
14112 PVM pVM = pVCpu->CTX_SUFF(pVM);
14113 uint32_t cInstr = 4096;
14114 for (;;)
14115 {
14116 /*
14117 * Log the state.
14118 */
14119# ifdef LOG_ENABLED
14120 iemLogCurInstr(pVCpu, pCtx, true);
14121# endif
14122
14123 /*
14124 * Do the decoding and emulation.
14125 */
14126 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14127 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14128 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14129 {
14130 Assert(pVCpu->iem.s.cActiveMappings == 0);
14131 pVCpu->iem.s.cInstructions++;
14132 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14133 {
14134 uint32_t fCpu = pVCpu->fLocalForcedActions
14135 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14136 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14137 | VMCPU_FF_TLB_FLUSH
14138# ifdef VBOX_WITH_RAW_MODE
14139 | VMCPU_FF_TRPM_SYNC_IDT
14140 | VMCPU_FF_SELM_SYNC_TSS
14141 | VMCPU_FF_SELM_SYNC_GDT
14142 | VMCPU_FF_SELM_SYNC_LDT
14143# endif
14144 | VMCPU_FF_INHIBIT_INTERRUPTS
14145 | VMCPU_FF_BLOCK_NMIS ));
14146
14147 if (RT_LIKELY( ( !fCpu
14148 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14149 && !pCtx->rflags.Bits.u1IF) )
14150 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14151 {
14152 if (cInstr-- > 0)
14153 {
14154 Assert(pVCpu->iem.s.cActiveMappings == 0);
14155 iemReInitDecoder(pVCpu);
14156 continue;
14157 }
14158 }
14159 }
14160 Assert(pVCpu->iem.s.cActiveMappings == 0);
14161 }
14162 else if (pVCpu->iem.s.cActiveMappings > 0)
14163 iemMemRollback(pVCpu);
14164 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14165 break;
14166 }
14167 }
14168# ifdef IEM_WITH_SETJMP
14169 else
14170 {
14171 if (pVCpu->iem.s.cActiveMappings > 0)
14172 iemMemRollback(pVCpu);
14173 pVCpu->iem.s.cLongJumps++;
14174 }
14175 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14176# endif
14177
14178 /*
14179 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14180 */
14181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14183# if defined(IEM_VERIFICATION_MODE_FULL)
14184 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14188# endif
14189 }
14190
14191 /*
14192 * Maybe re-enter raw-mode and log.
14193 */
14194# ifdef IN_RC
14195 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14196# endif
14197 if (rcStrict != VINF_SUCCESS)
14198 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14199 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14200 if (pcInstructions)
14201 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14202 return rcStrict;
14203#endif /* Not verification mode */
14204}
14205
14206
14207
14208/**
14209 * Injects a trap, fault, abort, software interrupt or external interrupt.
14210 *
14211 * The parameter list matches TRPMQueryTrapAll pretty closely.
14212 *
14213 * @returns Strict VBox status code.
14214 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14215 * @param u8TrapNo The trap number.
14216 * @param enmType What type is it (trap/fault/abort), software
14217 * interrupt or hardware interrupt.
14218 * @param uErrCode The error code if applicable.
14219 * @param uCr2 The CR2 value if applicable.
14220 * @param cbInstr The instruction length (only relevant for
14221 * software interrupts).
14222 */
14223VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14224 uint8_t cbInstr)
14225{
14226 iemInitDecoder(pVCpu, false);
14227#ifdef DBGFTRACE_ENABLED
14228 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14229 u8TrapNo, enmType, uErrCode, uCr2);
14230#endif
14231
14232 uint32_t fFlags;
14233 switch (enmType)
14234 {
14235 case TRPM_HARDWARE_INT:
14236 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14237 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14238 uErrCode = uCr2 = 0;
14239 break;
14240
14241 case TRPM_SOFTWARE_INT:
14242 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14243 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14244 uErrCode = uCr2 = 0;
14245 break;
14246
14247 case TRPM_TRAP:
14248 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14249 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14250 if (u8TrapNo == X86_XCPT_PF)
14251 fFlags |= IEM_XCPT_FLAGS_CR2;
14252 switch (u8TrapNo)
14253 {
14254 case X86_XCPT_DF:
14255 case X86_XCPT_TS:
14256 case X86_XCPT_NP:
14257 case X86_XCPT_SS:
14258 case X86_XCPT_PF:
14259 case X86_XCPT_AC:
14260 fFlags |= IEM_XCPT_FLAGS_ERR;
14261 break;
14262
14263 case X86_XCPT_NMI:
14264 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14265 break;
14266 }
14267 break;
14268
14269 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14270 }
14271
14272 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14273}
14274
14275
14276/**
14277 * Injects the active TRPM event.
14278 *
14279 * @returns Strict VBox status code.
14280 * @param pVCpu The cross context virtual CPU structure.
14281 */
14282VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14283{
14284#ifndef IEM_IMPLEMENTS_TASKSWITCH
14285 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14286#else
14287 uint8_t u8TrapNo;
14288 TRPMEVENT enmType;
14289 RTGCUINT uErrCode;
14290 RTGCUINTPTR uCr2;
14291 uint8_t cbInstr;
14292 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14293 if (RT_FAILURE(rc))
14294 return rc;
14295
14296 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14297
14298 /** @todo Are there any other codes that imply the event was successfully
14299 * delivered to the guest? See @bugref{6607}. */
14300 if ( rcStrict == VINF_SUCCESS
14301 || rcStrict == VINF_IEM_RAISED_XCPT)
14302 {
14303 TRPMResetTrap(pVCpu);
14304 }
14305 return rcStrict;
14306#endif
14307}
14308
14309
14310VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14311{
14312 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14313 return VERR_NOT_IMPLEMENTED;
14314}
14315
14316
14317VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14318{
14319 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14320 return VERR_NOT_IMPLEMENTED;
14321}
14322
14323
14324#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14325/**
14326 * Executes a IRET instruction with default operand size.
14327 *
14328 * This is for PATM.
14329 *
14330 * @returns VBox status code.
14331 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14332 * @param pCtxCore The register frame.
14333 */
14334VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14335{
14336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14337
14338 iemCtxCoreToCtx(pCtx, pCtxCore);
14339 iemInitDecoder(pVCpu);
14340 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14341 if (rcStrict == VINF_SUCCESS)
14342 iemCtxToCtxCore(pCtxCore, pCtx);
14343 else
14344 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14345 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14346 return rcStrict;
14347}
14348#endif
14349
14350
14351/**
14352 * Macro used by the IEMExec* method to check the given instruction length.
14353 *
14354 * Will return on failure!
14355 *
14356 * @param a_cbInstr The given instruction length.
14357 * @param a_cbMin The minimum length.
14358 */
14359#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14360 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14361 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14362
14363
14364/**
14365 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14366 *
14367 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14368 *
14369 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14371 * @param rcStrict The status code to fiddle.
14372 */
14373DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14374{
14375 iemUninitExec(pVCpu);
14376#ifdef IN_RC
14377 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14378 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14379#else
14380 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14381#endif
14382}
14383
14384
14385/**
14386 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14387 *
14388 * This API ASSUMES that the caller has already verified that the guest code is
14389 * allowed to access the I/O port. (The I/O port is in the DX register in the
14390 * guest state.)
14391 *
14392 * @returns Strict VBox status code.
14393 * @param pVCpu The cross context virtual CPU structure.
14394 * @param cbValue The size of the I/O port access (1, 2, or 4).
14395 * @param enmAddrMode The addressing mode.
14396 * @param fRepPrefix Indicates whether a repeat prefix is used
14397 * (doesn't matter which for this instruction).
14398 * @param cbInstr The instruction length in bytes.
14399 * @param iEffSeg The effective segment address.
14400 * @param fIoChecked Whether the access to the I/O port has been
14401 * checked or not. It's typically checked in the
14402 * HM scenario.
14403 */
14404VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14405 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14406{
14407 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14408 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14409
14410 /*
14411 * State init.
14412 */
14413 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14414
14415 /*
14416 * Switch orgy for getting to the right handler.
14417 */
14418 VBOXSTRICTRC rcStrict;
14419 if (fRepPrefix)
14420 {
14421 switch (enmAddrMode)
14422 {
14423 case IEMMODE_16BIT:
14424 switch (cbValue)
14425 {
14426 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14427 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14428 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14429 default:
14430 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14431 }
14432 break;
14433
14434 case IEMMODE_32BIT:
14435 switch (cbValue)
14436 {
14437 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14438 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14439 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14440 default:
14441 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14442 }
14443 break;
14444
14445 case IEMMODE_64BIT:
14446 switch (cbValue)
14447 {
14448 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14449 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14450 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14451 default:
14452 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14453 }
14454 break;
14455
14456 default:
14457 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14458 }
14459 }
14460 else
14461 {
14462 switch (enmAddrMode)
14463 {
14464 case IEMMODE_16BIT:
14465 switch (cbValue)
14466 {
14467 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14468 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14469 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14470 default:
14471 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14472 }
14473 break;
14474
14475 case IEMMODE_32BIT:
14476 switch (cbValue)
14477 {
14478 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14479 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14480 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14481 default:
14482 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14483 }
14484 break;
14485
14486 case IEMMODE_64BIT:
14487 switch (cbValue)
14488 {
14489 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14490 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14491 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14492 default:
14493 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14494 }
14495 break;
14496
14497 default:
14498 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14499 }
14500 }
14501
14502 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14503}
14504
14505
14506/**
14507 * Interface for HM and EM for executing string I/O IN (read) instructions.
14508 *
14509 * This API ASSUMES that the caller has already verified that the guest code is
14510 * allowed to access the I/O port. (The I/O port is in the DX register in the
14511 * guest state.)
14512 *
14513 * @returns Strict VBox status code.
14514 * @param pVCpu The cross context virtual CPU structure.
14515 * @param cbValue The size of the I/O port access (1, 2, or 4).
14516 * @param enmAddrMode The addressing mode.
14517 * @param fRepPrefix Indicates whether a repeat prefix is used
14518 * (doesn't matter which for this instruction).
14519 * @param cbInstr The instruction length in bytes.
14520 * @param fIoChecked Whether the access to the I/O port has been
14521 * checked or not. It's typically checked in the
14522 * HM scenario.
14523 */
14524VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14525 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14526{
14527 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14528
14529 /*
14530 * State init.
14531 */
14532 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14533
14534 /*
14535 * Switch orgy for getting to the right handler.
14536 */
14537 VBOXSTRICTRC rcStrict;
14538 if (fRepPrefix)
14539 {
14540 switch (enmAddrMode)
14541 {
14542 case IEMMODE_16BIT:
14543 switch (cbValue)
14544 {
14545 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14546 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14547 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14548 default:
14549 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14550 }
14551 break;
14552
14553 case IEMMODE_32BIT:
14554 switch (cbValue)
14555 {
14556 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14557 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14558 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14559 default:
14560 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14561 }
14562 break;
14563
14564 case IEMMODE_64BIT:
14565 switch (cbValue)
14566 {
14567 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14568 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14569 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14570 default:
14571 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14572 }
14573 break;
14574
14575 default:
14576 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14577 }
14578 }
14579 else
14580 {
14581 switch (enmAddrMode)
14582 {
14583 case IEMMODE_16BIT:
14584 switch (cbValue)
14585 {
14586 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14587 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14588 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14589 default:
14590 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14591 }
14592 break;
14593
14594 case IEMMODE_32BIT:
14595 switch (cbValue)
14596 {
14597 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14598 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14599 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14600 default:
14601 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14602 }
14603 break;
14604
14605 case IEMMODE_64BIT:
14606 switch (cbValue)
14607 {
14608 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14609 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14610 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14611 default:
14612 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14613 }
14614 break;
14615
14616 default:
14617 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14618 }
14619 }
14620
14621 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14622}
14623
14624
14625/**
14626 * Interface for rawmode to write execute an OUT instruction.
14627 *
14628 * @returns Strict VBox status code.
14629 * @param pVCpu The cross context virtual CPU structure.
14630 * @param cbInstr The instruction length in bytes.
14631 * @param u16Port The port to read.
14632 * @param cbReg The register size.
14633 *
14634 * @remarks In ring-0 not all of the state needs to be synced in.
14635 */
14636VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14637{
14638 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14639 Assert(cbReg <= 4 && cbReg != 3);
14640
14641 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14642 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14643 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14644}
14645
14646
14647/**
14648 * Interface for rawmode to write execute an IN instruction.
14649 *
14650 * @returns Strict VBox status code.
14651 * @param pVCpu The cross context virtual CPU structure.
14652 * @param cbInstr The instruction length in bytes.
14653 * @param u16Port The port to read.
14654 * @param cbReg The register size.
14655 */
14656VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14657{
14658 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14659 Assert(cbReg <= 4 && cbReg != 3);
14660
14661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14664}
14665
14666
14667/**
14668 * Interface for HM and EM to write to a CRx register.
14669 *
14670 * @returns Strict VBox status code.
14671 * @param pVCpu The cross context virtual CPU structure.
14672 * @param cbInstr The instruction length in bytes.
14673 * @param iCrReg The control register number (destination).
14674 * @param iGReg The general purpose register number (source).
14675 *
14676 * @remarks In ring-0 not all of the state needs to be synced in.
14677 */
14678VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14679{
14680 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14681 Assert(iCrReg < 16);
14682 Assert(iGReg < 16);
14683
14684 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14685 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14686 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14687}
14688
14689
14690/**
14691 * Interface for HM and EM to read from a CRx register.
14692 *
14693 * @returns Strict VBox status code.
14694 * @param pVCpu The cross context virtual CPU structure.
14695 * @param cbInstr The instruction length in bytes.
14696 * @param iGReg The general purpose register number (destination).
14697 * @param iCrReg The control register number (source).
14698 *
14699 * @remarks In ring-0 not all of the state needs to be synced in.
14700 */
14701VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14702{
14703 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14704 Assert(iCrReg < 16);
14705 Assert(iGReg < 16);
14706
14707 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14708 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14709 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14710}
14711
14712
14713/**
14714 * Interface for HM and EM to clear the CR0[TS] bit.
14715 *
14716 * @returns Strict VBox status code.
14717 * @param pVCpu The cross context virtual CPU structure.
14718 * @param cbInstr The instruction length in bytes.
14719 *
14720 * @remarks In ring-0 not all of the state needs to be synced in.
14721 */
14722VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14723{
14724 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14725
14726 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14727 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14728 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14729}
14730
14731
14732/**
14733 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14734 *
14735 * @returns Strict VBox status code.
14736 * @param pVCpu The cross context virtual CPU structure.
14737 * @param cbInstr The instruction length in bytes.
14738 * @param uValue The value to load into CR0.
14739 *
14740 * @remarks In ring-0 not all of the state needs to be synced in.
14741 */
14742VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14743{
14744 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14745
14746 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14747 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14748 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14749}
14750
14751
14752/**
14753 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14754 *
14755 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14756 *
14757 * @returns Strict VBox status code.
14758 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14759 * @param cbInstr The instruction length in bytes.
14760 * @remarks In ring-0 not all of the state needs to be synced in.
14761 * @thread EMT(pVCpu)
14762 */
14763VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14764{
14765 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14766
14767 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14768 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14769 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14770}
14771
14772#ifdef IN_RING3
14773
14774/**
14775 * Handles the unlikely and probably fatal merge cases.
14776 *
14777 * @returns Merged status code.
14778 * @param rcStrict Current EM status code.
14779 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14780 * with @a rcStrict.
14781 * @param iMemMap The memory mapping index. For error reporting only.
14782 * @param pVCpu The cross context virtual CPU structure of the calling
14783 * thread, for error reporting only.
14784 */
14785DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14786 unsigned iMemMap, PVMCPU pVCpu)
14787{
14788 if (RT_FAILURE_NP(rcStrict))
14789 return rcStrict;
14790
14791 if (RT_FAILURE_NP(rcStrictCommit))
14792 return rcStrictCommit;
14793
14794 if (rcStrict == rcStrictCommit)
14795 return rcStrictCommit;
14796
14797 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14798 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14799 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14801 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14802 return VERR_IOM_FF_STATUS_IPE;
14803}
14804
14805
14806/**
14807 * Helper for IOMR3ProcessForceFlag.
14808 *
14809 * @returns Merged status code.
14810 * @param rcStrict Current EM status code.
14811 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14812 * with @a rcStrict.
14813 * @param iMemMap The memory mapping index. For error reporting only.
14814 * @param pVCpu The cross context virtual CPU structure of the calling
14815 * thread, for error reporting only.
14816 */
14817DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14818{
14819 /* Simple. */
14820 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14821 return rcStrictCommit;
14822
14823 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14824 return rcStrict;
14825
14826 /* EM scheduling status codes. */
14827 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14828 && rcStrict <= VINF_EM_LAST))
14829 {
14830 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14831 && rcStrictCommit <= VINF_EM_LAST))
14832 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14833 }
14834
14835 /* Unlikely */
14836 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14837}
14838
14839
14840/**
14841 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14842 *
14843 * @returns Merge between @a rcStrict and what the commit operation returned.
14844 * @param pVM The cross context VM structure.
14845 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14846 * @param rcStrict The status code returned by ring-0 or raw-mode.
14847 */
14848VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14849{
14850 /*
14851 * Reset the pending commit.
14852 */
14853 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14854 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14855 ("%#x %#x %#x\n",
14856 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14857 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14858
14859 /*
14860 * Commit the pending bounce buffers (usually just one).
14861 */
14862 unsigned cBufs = 0;
14863 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14864 while (iMemMap-- > 0)
14865 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14866 {
14867 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14868 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14869 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14870
14871 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14872 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14873 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14874
14875 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14876 {
14877 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14879 pbBuf,
14880 cbFirst,
14881 PGMACCESSORIGIN_IEM);
14882 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14883 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14884 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14885 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14886 }
14887
14888 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14889 {
14890 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14892 pbBuf + cbFirst,
14893 cbSecond,
14894 PGMACCESSORIGIN_IEM);
14895 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14896 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14897 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14898 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14899 }
14900 cBufs++;
14901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14902 }
14903
14904 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14905 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14906 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14907 pVCpu->iem.s.cActiveMappings = 0;
14908 return rcStrict;
14909}
14910
14911#endif /* IN_RING3 */
14912
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette