VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65598

Last change on this file since 65598 was 65598, checked in by vboxsync, 8 years ago

IEM: (F)WAIT uses a different #NM test.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 576.0 KB
Line 
1/* $Id: IEMAll.cpp 65598 2017-02-03 11:56:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.idxPrefix = 127;
878 pVCpu->iem.s.uVex3rdReg = 127;
879 pVCpu->iem.s.uVexLength = 127;
880 pVCpu->iem.s.fEvexStuff = 127;
881 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
882# ifdef IEM_WITH_CODE_TLB
883 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
884 pVCpu->iem.s.pbInstrBuf = NULL;
885 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
886 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
887 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
888 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
889# else
890 pVCpu->iem.s.offOpcode = 127;
891 pVCpu->iem.s.cbOpcode = 127;
892# endif
893#endif
894
895 pVCpu->iem.s.cActiveMappings = 0;
896 pVCpu->iem.s.iNextMapping = 0;
897 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
898 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
899#ifdef VBOX_WITH_RAW_MODE_NOT_R0
900 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
901 && pCtx->cs.u64Base == 0
902 && pCtx->cs.u32Limit == UINT32_MAX
903 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
904 if (!pVCpu->iem.s.fInPatchCode)
905 CPUMRawLeave(pVCpu, VINF_SUCCESS);
906#endif
907
908#ifdef IEM_VERIFICATION_MODE_FULL
909 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
910 pVCpu->iem.s.fNoRem = true;
911#endif
912}
913
914
915/**
916 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
917 *
918 * @param pVCpu The cross context virtual CPU structure of the
919 * calling thread.
920 */
921DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
922{
923 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
924#ifdef IEM_VERIFICATION_MODE_FULL
925 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
926#endif
927#ifdef VBOX_STRICT
928# ifdef IEM_WITH_CODE_TLB
929 NOREF(pVCpu);
930# else
931 pVCpu->iem.s.cbOpcode = 0;
932# endif
933#else
934 NOREF(pVCpu);
935#endif
936}
937
938
939/**
940 * Initializes the decoder state.
941 *
942 * iemReInitDecoder is mostly a copy of this function.
943 *
944 * @param pVCpu The cross context virtual CPU structure of the
945 * calling thread.
946 * @param fBypassHandlers Whether to bypass access handlers.
947 */
948DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
949{
950 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
951
952 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
953
954#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
959 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
960 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
961 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
962 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
963#endif
964
965#ifdef VBOX_WITH_RAW_MODE_NOT_R0
966 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
967#endif
968 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
969#ifdef IEM_VERIFICATION_MODE_FULL
970 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
971 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
972#endif
973 IEMMODE enmMode = iemCalcCpuMode(pCtx);
974 pVCpu->iem.s.enmCpuMode = enmMode;
975 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
976 pVCpu->iem.s.enmEffAddrMode = enmMode;
977 if (enmMode != IEMMODE_64BIT)
978 {
979 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
980 pVCpu->iem.s.enmEffOpSize = enmMode;
981 }
982 else
983 {
984 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
985 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
986 }
987 pVCpu->iem.s.fPrefixes = 0;
988 pVCpu->iem.s.uRexReg = 0;
989 pVCpu->iem.s.uRexB = 0;
990 pVCpu->iem.s.uRexIndex = 0;
991 pVCpu->iem.s.idxPrefix = 0;
992 pVCpu->iem.s.uVex3rdReg = 0;
993 pVCpu->iem.s.uVexLength = 0;
994 pVCpu->iem.s.fEvexStuff = 0;
995 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
996#ifdef IEM_WITH_CODE_TLB
997 pVCpu->iem.s.pbInstrBuf = NULL;
998 pVCpu->iem.s.offInstrNextByte = 0;
999 pVCpu->iem.s.offCurInstrStart = 0;
1000# ifdef VBOX_STRICT
1001 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1002 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1003 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1004# endif
1005#else
1006 pVCpu->iem.s.offOpcode = 0;
1007 pVCpu->iem.s.cbOpcode = 0;
1008#endif
1009 pVCpu->iem.s.cActiveMappings = 0;
1010 pVCpu->iem.s.iNextMapping = 0;
1011 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1012 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1013#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1014 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1015 && pCtx->cs.u64Base == 0
1016 && pCtx->cs.u32Limit == UINT32_MAX
1017 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1018 if (!pVCpu->iem.s.fInPatchCode)
1019 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1020#endif
1021
1022#ifdef DBGFTRACE_ENABLED
1023 switch (enmMode)
1024 {
1025 case IEMMODE_64BIT:
1026 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1027 break;
1028 case IEMMODE_32BIT:
1029 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1030 break;
1031 case IEMMODE_16BIT:
1032 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1033 break;
1034 }
1035#endif
1036}
1037
1038
1039/**
1040 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1041 *
1042 * This is mostly a copy of iemInitDecoder.
1043 *
1044 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1045 */
1046DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1047{
1048 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1049
1050 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1051
1052#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1053 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1054 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1055 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1056 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1057 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1058 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1059 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1061#endif
1062
1063 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1064#ifdef IEM_VERIFICATION_MODE_FULL
1065 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1066 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1067#endif
1068 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1069 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1070 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1071 pVCpu->iem.s.enmEffAddrMode = enmMode;
1072 if (enmMode != IEMMODE_64BIT)
1073 {
1074 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1075 pVCpu->iem.s.enmEffOpSize = enmMode;
1076 }
1077 else
1078 {
1079 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1080 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1081 }
1082 pVCpu->iem.s.fPrefixes = 0;
1083 pVCpu->iem.s.uRexReg = 0;
1084 pVCpu->iem.s.uRexB = 0;
1085 pVCpu->iem.s.uRexIndex = 0;
1086 pVCpu->iem.s.fPrefixes = 0;
1087 pVCpu->iem.s.uVex3rdReg = 0;
1088 pVCpu->iem.s.uVexLength = 0;
1089 pVCpu->iem.s.fEvexStuff = 0;
1090 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1091#ifdef IEM_WITH_CODE_TLB
1092 if (pVCpu->iem.s.pbInstrBuf)
1093 {
1094 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1095 - pVCpu->iem.s.uInstrBufPc;
1096 if (off < pVCpu->iem.s.cbInstrBufTotal)
1097 {
1098 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1099 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1100 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1101 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1102 else
1103 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1104 }
1105 else
1106 {
1107 pVCpu->iem.s.pbInstrBuf = NULL;
1108 pVCpu->iem.s.offInstrNextByte = 0;
1109 pVCpu->iem.s.offCurInstrStart = 0;
1110 pVCpu->iem.s.cbInstrBuf = 0;
1111 pVCpu->iem.s.cbInstrBufTotal = 0;
1112 }
1113 }
1114 else
1115 {
1116 pVCpu->iem.s.offInstrNextByte = 0;
1117 pVCpu->iem.s.offCurInstrStart = 0;
1118 pVCpu->iem.s.cbInstrBuf = 0;
1119 pVCpu->iem.s.cbInstrBufTotal = 0;
1120 }
1121#else
1122 pVCpu->iem.s.cbOpcode = 0;
1123 pVCpu->iem.s.offOpcode = 0;
1124#endif
1125 Assert(pVCpu->iem.s.cActiveMappings == 0);
1126 pVCpu->iem.s.iNextMapping = 0;
1127 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1128 Assert(pVCpu->iem.s.fBypassHandlers == false);
1129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1130 if (!pVCpu->iem.s.fInPatchCode)
1131 { /* likely */ }
1132 else
1133 {
1134 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1135 && pCtx->cs.u64Base == 0
1136 && pCtx->cs.u32Limit == UINT32_MAX
1137 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1138 if (!pVCpu->iem.s.fInPatchCode)
1139 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1140 }
1141#endif
1142
1143#ifdef DBGFTRACE_ENABLED
1144 switch (enmMode)
1145 {
1146 case IEMMODE_64BIT:
1147 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1148 break;
1149 case IEMMODE_32BIT:
1150 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1151 break;
1152 case IEMMODE_16BIT:
1153 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1154 break;
1155 }
1156#endif
1157}
1158
1159
1160
1161/**
1162 * Prefetch opcodes the first time when starting executing.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVCpu The cross context virtual CPU structure of the
1166 * calling thread.
1167 * @param fBypassHandlers Whether to bypass access handlers.
1168 */
1169IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1170{
1171#ifdef IEM_VERIFICATION_MODE_FULL
1172 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1173#endif
1174 iemInitDecoder(pVCpu, fBypassHandlers);
1175
1176#ifdef IEM_WITH_CODE_TLB
1177 /** @todo Do ITLB lookup here. */
1178
1179#else /* !IEM_WITH_CODE_TLB */
1180
1181 /*
1182 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1183 *
1184 * First translate CS:rIP to a physical address.
1185 */
1186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1187 uint32_t cbToTryRead;
1188 RTGCPTR GCPtrPC;
1189 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1190 {
1191 cbToTryRead = PAGE_SIZE;
1192 GCPtrPC = pCtx->rip;
1193 if (IEM_IS_CANONICAL(GCPtrPC))
1194 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1195 else
1196 return iemRaiseGeneralProtectionFault0(pVCpu);
1197 }
1198 else
1199 {
1200 uint32_t GCPtrPC32 = pCtx->eip;
1201 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1202 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1203 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1204 else
1205 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1206 if (cbToTryRead) { /* likely */ }
1207 else /* overflowed */
1208 {
1209 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1210 cbToTryRead = UINT32_MAX;
1211 }
1212 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1213 Assert(GCPtrPC <= UINT32_MAX);
1214 }
1215
1216# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1217 /* Allow interpretation of patch manager code blocks since they can for
1218 instance throw #PFs for perfectly good reasons. */
1219 if (pVCpu->iem.s.fInPatchCode)
1220 {
1221 size_t cbRead = 0;
1222 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1223 AssertRCReturn(rc, rc);
1224 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1225 return VINF_SUCCESS;
1226 }
1227# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1228
1229 RTGCPHYS GCPhys;
1230 uint64_t fFlags;
1231 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1232 if (RT_SUCCESS(rc)) { /* probable */ }
1233 else
1234 {
1235 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1236 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1237 }
1238 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1239 else
1240 {
1241 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1242 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1243 }
1244 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1245 else
1246 {
1247 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1248 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1249 }
1250 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255# ifdef IEM_VERIFICATION_MODE_FULL
1256 /*
1257 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1258 * instruction.
1259 */
1260 /** @todo optimize this differently by not using PGMPhysRead. */
1261 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1262 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1263 if ( offPrevOpcodes < cbOldOpcodes
1264 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1265 {
1266 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1267 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1268 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1269 pVCpu->iem.s.cbOpcode = cbNew;
1270 return VINF_SUCCESS;
1271 }
1272# endif
1273
1274 /*
1275 * Read the bytes at this address.
1276 */
1277 PVM pVM = pVCpu->CTX_SUFF(pVM);
1278# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1279 size_t cbActual;
1280 if ( PATMIsEnabled(pVM)
1281 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1282 {
1283 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1284 Assert(cbActual > 0);
1285 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1286 }
1287 else
1288# endif
1289 {
1290 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1291 if (cbToTryRead > cbLeftOnPage)
1292 cbToTryRead = cbLeftOnPage;
1293 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1294 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1295
1296 if (!pVCpu->iem.s.fBypassHandlers)
1297 {
1298 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1299 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1300 { /* likely */ }
1301 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1304 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1305 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1306 }
1307 else
1308 {
1309 Log((RT_SUCCESS(rcStrict)
1310 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1311 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1312 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1313 return rcStrict;
1314 }
1315 }
1316 else
1317 {
1318 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1319 if (RT_SUCCESS(rc))
1320 { /* likely */ }
1321 else
1322 {
1323 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1324 GCPtrPC, GCPhys, rc, cbToTryRead));
1325 return rc;
1326 }
1327 }
1328 pVCpu->iem.s.cbOpcode = cbToTryRead;
1329 }
1330#endif /* !IEM_WITH_CODE_TLB */
1331 return VINF_SUCCESS;
1332}
1333
1334
1335/**
1336 * Invalidates the IEM TLBs.
1337 *
1338 * This is called internally as well as by PGM when moving GC mappings.
1339 *
1340 * @returns
1341 * @param pVCpu The cross context virtual CPU structure of the calling
1342 * thread.
1343 * @param fVmm Set when PGM calls us with a remapping.
1344 */
1345VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1346{
1347#ifdef IEM_WITH_CODE_TLB
1348 pVCpu->iem.s.cbInstrBufTotal = 0;
1349 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1350 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1351 { /* very likely */ }
1352 else
1353 {
1354 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1355 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1356 while (i-- > 0)
1357 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1358 }
1359#endif
1360
1361#ifdef IEM_WITH_DATA_TLB
1362 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1363 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1364 { /* very likely */ }
1365 else
1366 {
1367 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1368 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1369 while (i-- > 0)
1370 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1371 }
1372#endif
1373 NOREF(pVCpu); NOREF(fVmm);
1374}
1375
1376
1377/**
1378 * Invalidates a page in the TLBs.
1379 *
1380 * @param pVCpu The cross context virtual CPU structure of the calling
1381 * thread.
1382 * @param GCPtr The address of the page to invalidate
1383 */
1384VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1385{
1386#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1387 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1388 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1389 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1390 uintptr_t idx = (uint8_t)GCPtr;
1391
1392# ifdef IEM_WITH_CODE_TLB
1393 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1394 {
1395 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1396 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1397 pVCpu->iem.s.cbInstrBufTotal = 0;
1398 }
1399# endif
1400
1401# ifdef IEM_WITH_DATA_TLB
1402 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1403 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1404# endif
1405#else
1406 NOREF(pVCpu); NOREF(GCPtr);
1407#endif
1408}
1409
1410
1411/**
1412 * Invalidates the host physical aspects of the IEM TLBs.
1413 *
1414 * This is called internally as well as by PGM when moving GC mappings.
1415 *
1416 * @param pVCpu The cross context virtual CPU structure of the calling
1417 * thread.
1418 */
1419VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1420{
1421#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1422 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1423
1424# ifdef IEM_WITH_CODE_TLB
1425 pVCpu->iem.s.cbInstrBufTotal = 0;
1426# endif
1427 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1428 if (uTlbPhysRev != 0)
1429 {
1430 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1431 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1432 }
1433 else
1434 {
1435 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1436 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1437
1438 unsigned i;
1439# ifdef IEM_WITH_CODE_TLB
1440 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1441 while (i-- > 0)
1442 {
1443 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1444 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1445 }
1446# endif
1447# ifdef IEM_WITH_DATA_TLB
1448 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1449 while (i-- > 0)
1450 {
1451 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1452 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1453 }
1454# endif
1455 }
1456#else
1457 NOREF(pVCpu);
1458#endif
1459}
1460
1461
1462/**
1463 * Invalidates the host physical aspects of the IEM TLBs.
1464 *
1465 * This is called internally as well as by PGM when moving GC mappings.
1466 *
1467 * @param pVM The cross context VM structure.
1468 *
1469 * @remarks Caller holds the PGM lock.
1470 */
1471VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1472{
1473 RT_NOREF_PV(pVM);
1474}
1475
1476#ifdef IEM_WITH_CODE_TLB
1477
1478/**
1479 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1480 * failure and jumps.
1481 *
1482 * We end up here for a number of reasons:
1483 * - pbInstrBuf isn't yet initialized.
1484 * - Advancing beyond the buffer boundrary (e.g. cross page).
1485 * - Advancing beyond the CS segment limit.
1486 * - Fetching from non-mappable page (e.g. MMIO).
1487 *
1488 * @param pVCpu The cross context virtual CPU structure of the
1489 * calling thread.
1490 * @param pvDst Where to return the bytes.
1491 * @param cbDst Number of bytes to read.
1492 *
1493 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1494 */
1495IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1496{
1497#ifdef IN_RING3
1498//__debugbreak();
1499 for (;;)
1500 {
1501 Assert(cbDst <= 8);
1502 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1503
1504 /*
1505 * We might have a partial buffer match, deal with that first to make the
1506 * rest simpler. This is the first part of the cross page/buffer case.
1507 */
1508 if (pVCpu->iem.s.pbInstrBuf != NULL)
1509 {
1510 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1511 {
1512 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1513 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1514 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1515
1516 cbDst -= cbCopy;
1517 pvDst = (uint8_t *)pvDst + cbCopy;
1518 offBuf += cbCopy;
1519 pVCpu->iem.s.offInstrNextByte += offBuf;
1520 }
1521 }
1522
1523 /*
1524 * Check segment limit, figuring how much we're allowed to access at this point.
1525 *
1526 * We will fault immediately if RIP is past the segment limit / in non-canonical
1527 * territory. If we do continue, there are one or more bytes to read before we
1528 * end up in trouble and we need to do that first before faulting.
1529 */
1530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1531 RTGCPTR GCPtrFirst;
1532 uint32_t cbMaxRead;
1533 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1534 {
1535 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1536 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1537 { /* likely */ }
1538 else
1539 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1540 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1541 }
1542 else
1543 {
1544 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1545 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1546 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1547 { /* likely */ }
1548 else
1549 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1550 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1551 if (cbMaxRead != 0)
1552 { /* likely */ }
1553 else
1554 {
1555 /* Overflowed because address is 0 and limit is max. */
1556 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1557 cbMaxRead = X86_PAGE_SIZE;
1558 }
1559 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1560 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1561 if (cbMaxRead2 < cbMaxRead)
1562 cbMaxRead = cbMaxRead2;
1563 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1564 }
1565
1566 /*
1567 * Get the TLB entry for this piece of code.
1568 */
1569 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1572 if (pTlbe->uTag == uTag)
1573 {
1574 /* likely when executing lots of code, otherwise unlikely */
1575# ifdef VBOX_WITH_STATISTICS
1576 pVCpu->iem.s.CodeTlb.cTlbHits++;
1577# endif
1578 }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1582# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1583 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1584 {
1585 pTlbe->uTag = uTag;
1586 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1587 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1588 pTlbe->GCPhys = NIL_RTGCPHYS;
1589 pTlbe->pbMappingR3 = NULL;
1590 }
1591 else
1592# endif
1593 {
1594 RTGCPHYS GCPhys;
1595 uint64_t fFlags;
1596 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1597 if (RT_FAILURE(rc))
1598 {
1599 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1600 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1601 }
1602
1603 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1604 pTlbe->uTag = uTag;
1605 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1606 pTlbe->GCPhys = GCPhys;
1607 pTlbe->pbMappingR3 = NULL;
1608 }
1609 }
1610
1611 /*
1612 * Check TLB page table level access flags.
1613 */
1614 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1615 {
1616 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1617 {
1618 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1619 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1620 }
1621 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1622 {
1623 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1624 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1625 }
1626 }
1627
1628# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1629 /*
1630 * Allow interpretation of patch manager code blocks since they can for
1631 * instance throw #PFs for perfectly good reasons.
1632 */
1633 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1634 { /* no unlikely */ }
1635 else
1636 {
1637 /** @todo Could be optimized this a little in ring-3 if we liked. */
1638 size_t cbRead = 0;
1639 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1640 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1641 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1642 return;
1643 }
1644# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1645
1646 /*
1647 * Look up the physical page info if necessary.
1648 */
1649 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1650 { /* not necessary */ }
1651 else
1652 {
1653 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1654 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1655 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1656 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1657 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1658 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1659 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1660 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1661 }
1662
1663# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1664 /*
1665 * Try do a direct read using the pbMappingR3 pointer.
1666 */
1667 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1668 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1669 {
1670 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1671 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1672 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1673 {
1674 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1675 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1676 }
1677 else
1678 {
1679 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1680 Assert(cbInstr < cbMaxRead);
1681 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1682 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1683 }
1684 if (cbDst <= cbMaxRead)
1685 {
1686 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1687 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1688 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1689 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1690 return;
1691 }
1692 pVCpu->iem.s.pbInstrBuf = NULL;
1693
1694 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1695 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1696 }
1697 else
1698# endif
1699#if 0
1700 /*
1701 * If there is no special read handling, so we can read a bit more and
1702 * put it in the prefetch buffer.
1703 */
1704 if ( cbDst < cbMaxRead
1705 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1706 {
1707 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1708 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1709 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1710 { /* likely */ }
1711 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1712 {
1713 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1714 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1715 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1716 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1717 }
1718 else
1719 {
1720 Log((RT_SUCCESS(rcStrict)
1721 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1722 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1723 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1724 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1725 }
1726 }
1727 /*
1728 * Special read handling, so only read exactly what's needed.
1729 * This is a highly unlikely scenario.
1730 */
1731 else
1732#endif
1733 {
1734 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1735 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1736 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1737 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1738 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1739 { /* likely */ }
1740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1741 {
1742 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1743 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1744 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1745 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1746 }
1747 else
1748 {
1749 Log((RT_SUCCESS(rcStrict)
1750 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1751 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1752 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1753 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1754 }
1755 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1756 if (cbToRead == cbDst)
1757 return;
1758 }
1759
1760 /*
1761 * More to read, loop.
1762 */
1763 cbDst -= cbMaxRead;
1764 pvDst = (uint8_t *)pvDst + cbMaxRead;
1765 }
1766#else
1767 RT_NOREF(pvDst, cbDst);
1768 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1769#endif
1770}
1771
1772#else
1773
1774/**
1775 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1776 * exception if it fails.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pVCpu The cross context virtual CPU structure of the
1780 * calling thread.
1781 * @param cbMin The minimum number of bytes relative offOpcode
1782 * that must be read.
1783 */
1784IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1785{
1786 /*
1787 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1788 *
1789 * First translate CS:rIP to a physical address.
1790 */
1791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1792 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1793 uint32_t cbToTryRead;
1794 RTGCPTR GCPtrNext;
1795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1796 {
1797 cbToTryRead = PAGE_SIZE;
1798 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1799 if (!IEM_IS_CANONICAL(GCPtrNext))
1800 return iemRaiseGeneralProtectionFault0(pVCpu);
1801 }
1802 else
1803 {
1804 uint32_t GCPtrNext32 = pCtx->eip;
1805 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1806 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1807 if (GCPtrNext32 > pCtx->cs.u32Limit)
1808 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1809 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1810 if (!cbToTryRead) /* overflowed */
1811 {
1812 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1813 cbToTryRead = UINT32_MAX;
1814 /** @todo check out wrapping around the code segment. */
1815 }
1816 if (cbToTryRead < cbMin - cbLeft)
1817 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1818 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1819 }
1820
1821 /* Only read up to the end of the page, and make sure we don't read more
1822 than the opcode buffer can hold. */
1823 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1824 if (cbToTryRead > cbLeftOnPage)
1825 cbToTryRead = cbLeftOnPage;
1826 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1827 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1828/** @todo r=bird: Convert assertion into undefined opcode exception? */
1829 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1830
1831# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1832 /* Allow interpretation of patch manager code blocks since they can for
1833 instance throw #PFs for perfectly good reasons. */
1834 if (pVCpu->iem.s.fInPatchCode)
1835 {
1836 size_t cbRead = 0;
1837 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1838 AssertRCReturn(rc, rc);
1839 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1840 return VINF_SUCCESS;
1841 }
1842# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1843
1844 RTGCPHYS GCPhys;
1845 uint64_t fFlags;
1846 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1847 if (RT_FAILURE(rc))
1848 {
1849 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1850 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1851 }
1852 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1853 {
1854 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1855 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1856 }
1857 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1858 {
1859 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1860 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1861 }
1862 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1863 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1864 /** @todo Check reserved bits and such stuff. PGM is better at doing
1865 * that, so do it when implementing the guest virtual address
1866 * TLB... */
1867
1868 /*
1869 * Read the bytes at this address.
1870 *
1871 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1872 * and since PATM should only patch the start of an instruction there
1873 * should be no need to check again here.
1874 */
1875 if (!pVCpu->iem.s.fBypassHandlers)
1876 {
1877 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1878 cbToTryRead, PGMACCESSORIGIN_IEM);
1879 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1880 { /* likely */ }
1881 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1882 {
1883 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1884 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1885 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 return rcStrict;
1894 }
1895 }
1896 else
1897 {
1898 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1899 if (RT_SUCCESS(rc))
1900 { /* likely */ }
1901 else
1902 {
1903 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1904 return rc;
1905 }
1906 }
1907 pVCpu->iem.s.cbOpcode += cbToTryRead;
1908 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1909
1910 return VINF_SUCCESS;
1911}
1912
1913#endif /* !IEM_WITH_CODE_TLB */
1914#ifndef IEM_WITH_SETJMP
1915
1916/**
1917 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1918 *
1919 * @returns Strict VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure of the
1921 * calling thread.
1922 * @param pb Where to return the opcode byte.
1923 */
1924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1925{
1926 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1927 if (rcStrict == VINF_SUCCESS)
1928 {
1929 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1930 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1931 pVCpu->iem.s.offOpcode = offOpcode + 1;
1932 }
1933 else
1934 *pb = 0;
1935 return rcStrict;
1936}
1937
1938
1939/**
1940 * Fetches the next opcode byte.
1941 *
1942 * @returns Strict VBox status code.
1943 * @param pVCpu The cross context virtual CPU structure of the
1944 * calling thread.
1945 * @param pu8 Where to return the opcode byte.
1946 */
1947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1948{
1949 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1950 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1951 {
1952 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1953 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1954 return VINF_SUCCESS;
1955 }
1956 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1957}
1958
1959#else /* IEM_WITH_SETJMP */
1960
1961/**
1962 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1963 *
1964 * @returns The opcode byte.
1965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1966 */
1967DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1968{
1969# ifdef IEM_WITH_CODE_TLB
1970 uint8_t u8;
1971 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1972 return u8;
1973# else
1974 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1975 if (rcStrict == VINF_SUCCESS)
1976 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1977 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1978# endif
1979}
1980
1981
1982/**
1983 * Fetches the next opcode byte, longjmp on error.
1984 *
1985 * @returns The opcode byte.
1986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1987 */
1988DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1989{
1990# ifdef IEM_WITH_CODE_TLB
1991 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1992 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1993 if (RT_LIKELY( pbBuf != NULL
1994 && offBuf < pVCpu->iem.s.cbInstrBuf))
1995 {
1996 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1997 return pbBuf[offBuf];
1998 }
1999# else
2000 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2001 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2002 {
2003 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2004 return pVCpu->iem.s.abOpcode[offOpcode];
2005 }
2006# endif
2007 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2008}
2009
2010#endif /* IEM_WITH_SETJMP */
2011
2012/**
2013 * Fetches the next opcode byte, returns automatically on failure.
2014 *
2015 * @param a_pu8 Where to return the opcode byte.
2016 * @remark Implicitly references pVCpu.
2017 */
2018#ifndef IEM_WITH_SETJMP
2019# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2020 do \
2021 { \
2022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2023 if (rcStrict2 == VINF_SUCCESS) \
2024 { /* likely */ } \
2025 else \
2026 return rcStrict2; \
2027 } while (0)
2028#else
2029# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2030#endif /* IEM_WITH_SETJMP */
2031
2032
2033#ifndef IEM_WITH_SETJMP
2034/**
2035 * Fetches the next signed byte from the opcode stream.
2036 *
2037 * @returns Strict VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param pi8 Where to return the signed byte.
2040 */
2041DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2042{
2043 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2044}
2045#endif /* !IEM_WITH_SETJMP */
2046
2047
2048/**
2049 * Fetches the next signed byte from the opcode stream, returning automatically
2050 * on failure.
2051 *
2052 * @param a_pi8 Where to return the signed byte.
2053 * @remark Implicitly references pVCpu.
2054 */
2055#ifndef IEM_WITH_SETJMP
2056# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2057 do \
2058 { \
2059 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2060 if (rcStrict2 != VINF_SUCCESS) \
2061 return rcStrict2; \
2062 } while (0)
2063#else /* IEM_WITH_SETJMP */
2064# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2065
2066#endif /* IEM_WITH_SETJMP */
2067
2068#ifndef IEM_WITH_SETJMP
2069
2070/**
2071 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2072 *
2073 * @returns Strict VBox status code.
2074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2075 * @param pu16 Where to return the opcode dword.
2076 */
2077DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2078{
2079 uint8_t u8;
2080 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2081 if (rcStrict == VINF_SUCCESS)
2082 *pu16 = (int8_t)u8;
2083 return rcStrict;
2084}
2085
2086
2087/**
2088 * Fetches the next signed byte from the opcode stream, extending it to
2089 * unsigned 16-bit.
2090 *
2091 * @returns Strict VBox status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param pu16 Where to return the unsigned word.
2094 */
2095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2096{
2097 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2098 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2099 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2100
2101 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2102 pVCpu->iem.s.offOpcode = offOpcode + 1;
2103 return VINF_SUCCESS;
2104}
2105
2106#endif /* !IEM_WITH_SETJMP */
2107
2108/**
2109 * Fetches the next signed byte from the opcode stream and sign-extending it to
2110 * a word, returning automatically on failure.
2111 *
2112 * @param a_pu16 Where to return the word.
2113 * @remark Implicitly references pVCpu.
2114 */
2115#ifndef IEM_WITH_SETJMP
2116# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2117 do \
2118 { \
2119 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2120 if (rcStrict2 != VINF_SUCCESS) \
2121 return rcStrict2; \
2122 } while (0)
2123#else
2124# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2125#endif
2126
2127#ifndef IEM_WITH_SETJMP
2128
2129/**
2130 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2131 *
2132 * @returns Strict VBox status code.
2133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2134 * @param pu32 Where to return the opcode dword.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2137{
2138 uint8_t u8;
2139 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2140 if (rcStrict == VINF_SUCCESS)
2141 *pu32 = (int8_t)u8;
2142 return rcStrict;
2143}
2144
2145
2146/**
2147 * Fetches the next signed byte from the opcode stream, extending it to
2148 * unsigned 32-bit.
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 * @param pu32 Where to return the unsigned dword.
2153 */
2154DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2155{
2156 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2157 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2158 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2159
2160 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2161 pVCpu->iem.s.offOpcode = offOpcode + 1;
2162 return VINF_SUCCESS;
2163}
2164
2165#endif /* !IEM_WITH_SETJMP */
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream and sign-extending it to
2169 * a word, returning automatically on failure.
2170 *
2171 * @param a_pu32 Where to return the word.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else
2183# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184#endif
2185
2186#ifndef IEM_WITH_SETJMP
2187
2188/**
2189 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2190 *
2191 * @returns Strict VBox status code.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 * @param pu64 Where to return the opcode qword.
2194 */
2195DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2196{
2197 uint8_t u8;
2198 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2199 if (rcStrict == VINF_SUCCESS)
2200 *pu64 = (int8_t)u8;
2201 return rcStrict;
2202}
2203
2204
2205/**
2206 * Fetches the next signed byte from the opcode stream, extending it to
2207 * unsigned 64-bit.
2208 *
2209 * @returns Strict VBox status code.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 * @param pu64 Where to return the unsigned qword.
2212 */
2213DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2214{
2215 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2216 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2217 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2218
2219 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2220 pVCpu->iem.s.offOpcode = offOpcode + 1;
2221 return VINF_SUCCESS;
2222}
2223
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu64 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode word.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2259 if (rcStrict == VINF_SUCCESS)
2260 {
2261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2263 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2264# else
2265 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2266# endif
2267 pVCpu->iem.s.offOpcode = offOpcode + 2;
2268 }
2269 else
2270 *pu16 = 0;
2271 return rcStrict;
2272}
2273
2274
2275/**
2276 * Fetches the next opcode word.
2277 *
2278 * @returns Strict VBox status code.
2279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2280 * @param pu16 Where to return the opcode word.
2281 */
2282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2283{
2284 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2285 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2286 {
2287 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2288# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2289 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2290# else
2291 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2292# endif
2293 return VINF_SUCCESS;
2294 }
2295 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2296}
2297
2298#else /* IEM_WITH_SETJMP */
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2302 *
2303 * @returns The opcode word.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 */
2306DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2307{
2308# ifdef IEM_WITH_CODE_TLB
2309 uint16_t u16;
2310 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2311 return u16;
2312# else
2313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2314 if (rcStrict == VINF_SUCCESS)
2315 {
2316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2317 pVCpu->iem.s.offOpcode += 2;
2318# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2319 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2320# else
2321 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2322# endif
2323 }
2324 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2325# endif
2326}
2327
2328
2329/**
2330 * Fetches the next opcode word, longjmp on error.
2331 *
2332 * @returns The opcode word.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 */
2335DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2336{
2337# ifdef IEM_WITH_CODE_TLB
2338 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2339 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2340 if (RT_LIKELY( pbBuf != NULL
2341 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2342 {
2343 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2344# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2345 return *(uint16_t const *)&pbBuf[offBuf];
2346# else
2347 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2348# endif
2349 }
2350# else
2351 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2352 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2353 {
2354 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2355# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2356 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2357# else
2358 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2359# endif
2360 }
2361# endif
2362 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2363}
2364
2365#endif /* IEM_WITH_SETJMP */
2366
2367
2368/**
2369 * Fetches the next opcode word, returns automatically on failure.
2370 *
2371 * @param a_pu16 Where to return the opcode word.
2372 * @remark Implicitly references pVCpu.
2373 */
2374#ifndef IEM_WITH_SETJMP
2375# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2376 do \
2377 { \
2378 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2379 if (rcStrict2 != VINF_SUCCESS) \
2380 return rcStrict2; \
2381 } while (0)
2382#else
2383# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2384#endif
2385
2386#ifndef IEM_WITH_SETJMP
2387
2388/**
2389 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu32 Where to return the opcode double word.
2394 */
2395DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2396{
2397 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2398 if (rcStrict == VINF_SUCCESS)
2399 {
2400 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2401 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2402 pVCpu->iem.s.offOpcode = offOpcode + 2;
2403 }
2404 else
2405 *pu32 = 0;
2406 return rcStrict;
2407}
2408
2409
2410/**
2411 * Fetches the next opcode word, zero extending it to a double word.
2412 *
2413 * @returns Strict VBox status code.
2414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2415 * @param pu32 Where to return the opcode double word.
2416 */
2417DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2418{
2419 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2420 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2421 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2422
2423 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2424 pVCpu->iem.s.offOpcode = offOpcode + 2;
2425 return VINF_SUCCESS;
2426}
2427
2428#endif /* !IEM_WITH_SETJMP */
2429
2430
2431/**
2432 * Fetches the next opcode word and zero extends it to a double word, returns
2433 * automatically on failure.
2434 *
2435 * @param a_pu32 Where to return the opcode double word.
2436 * @remark Implicitly references pVCpu.
2437 */
2438#ifndef IEM_WITH_SETJMP
2439# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2440 do \
2441 { \
2442 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2443 if (rcStrict2 != VINF_SUCCESS) \
2444 return rcStrict2; \
2445 } while (0)
2446#else
2447# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2448#endif
2449
2450#ifndef IEM_WITH_SETJMP
2451
2452/**
2453 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2454 *
2455 * @returns Strict VBox status code.
2456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2457 * @param pu64 Where to return the opcode quad word.
2458 */
2459DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2460{
2461 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2462 if (rcStrict == VINF_SUCCESS)
2463 {
2464 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2465 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2466 pVCpu->iem.s.offOpcode = offOpcode + 2;
2467 }
2468 else
2469 *pu64 = 0;
2470 return rcStrict;
2471}
2472
2473
2474/**
2475 * Fetches the next opcode word, zero extending it to a quad word.
2476 *
2477 * @returns Strict VBox status code.
2478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2479 * @param pu64 Where to return the opcode quad word.
2480 */
2481DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2482{
2483 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2484 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2485 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2486
2487 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488 pVCpu->iem.s.offOpcode = offOpcode + 2;
2489 return VINF_SUCCESS;
2490}
2491
2492#endif /* !IEM_WITH_SETJMP */
2493
2494/**
2495 * Fetches the next opcode word and zero extends it to a quad word, returns
2496 * automatically on failure.
2497 *
2498 * @param a_pu64 Where to return the opcode quad word.
2499 * @remark Implicitly references pVCpu.
2500 */
2501#ifndef IEM_WITH_SETJMP
2502# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2503 do \
2504 { \
2505 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2506 if (rcStrict2 != VINF_SUCCESS) \
2507 return rcStrict2; \
2508 } while (0)
2509#else
2510# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2511#endif
2512
2513
2514#ifndef IEM_WITH_SETJMP
2515/**
2516 * Fetches the next signed word from the opcode stream.
2517 *
2518 * @returns Strict VBox status code.
2519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2520 * @param pi16 Where to return the signed word.
2521 */
2522DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2523{
2524 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2525}
2526#endif /* !IEM_WITH_SETJMP */
2527
2528
2529/**
2530 * Fetches the next signed word from the opcode stream, returning automatically
2531 * on failure.
2532 *
2533 * @param a_pi16 Where to return the signed word.
2534 * @remark Implicitly references pVCpu.
2535 */
2536#ifndef IEM_WITH_SETJMP
2537# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2538 do \
2539 { \
2540 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2541 if (rcStrict2 != VINF_SUCCESS) \
2542 return rcStrict2; \
2543 } while (0)
2544#else
2545# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2546#endif
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu32 Where to return the opcode dword.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2567 pVCpu->iem.s.abOpcode[offOpcode + 1],
2568 pVCpu->iem.s.abOpcode[offOpcode + 2],
2569 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2570# endif
2571 pVCpu->iem.s.offOpcode = offOpcode + 4;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode dword.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2590 {
2591 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2593 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2594# else
2595 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2596 pVCpu->iem.s.abOpcode[offOpcode + 1],
2597 pVCpu->iem.s.abOpcode[offOpcode + 2],
2598 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2599# endif
2600 return VINF_SUCCESS;
2601 }
2602 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2603}
2604
2605#else /* !IEM_WITH_SETJMP */
2606
2607/**
2608 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2609 *
2610 * @returns The opcode dword.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 */
2613DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2614{
2615# ifdef IEM_WITH_CODE_TLB
2616 uint32_t u32;
2617 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2618 return u32;
2619# else
2620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2621 if (rcStrict == VINF_SUCCESS)
2622 {
2623 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2624 pVCpu->iem.s.offOpcode = offOpcode + 4;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 }
2634 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2635# endif
2636}
2637
2638
2639/**
2640 * Fetches the next opcode dword, longjmp on error.
2641 *
2642 * @returns The opcode dword.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 */
2645DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2646{
2647# ifdef IEM_WITH_CODE_TLB
2648 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2649 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2650 if (RT_LIKELY( pbBuf != NULL
2651 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2652 {
2653 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 return *(uint32_t const *)&pbBuf[offBuf];
2656# else
2657 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2658 pbBuf[offBuf + 1],
2659 pbBuf[offBuf + 2],
2660 pbBuf[offBuf + 3]);
2661# endif
2662 }
2663# else
2664 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2665 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2666 {
2667 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2668# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2669 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2670# else
2671 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2672 pVCpu->iem.s.abOpcode[offOpcode + 1],
2673 pVCpu->iem.s.abOpcode[offOpcode + 2],
2674 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2675# endif
2676 }
2677# endif
2678 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2679}
2680
2681#endif /* !IEM_WITH_SETJMP */
2682
2683
2684/**
2685 * Fetches the next opcode dword, returns automatically on failure.
2686 *
2687 * @param a_pu32 Where to return the opcode dword.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2700#endif
2701
2702#ifndef IEM_WITH_SETJMP
2703
2704/**
2705 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pu64 Where to return the opcode dword.
2710 */
2711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2712{
2713 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2714 if (rcStrict == VINF_SUCCESS)
2715 {
2716 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2717 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2718 pVCpu->iem.s.abOpcode[offOpcode + 1],
2719 pVCpu->iem.s.abOpcode[offOpcode + 2],
2720 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2721 pVCpu->iem.s.offOpcode = offOpcode + 4;
2722 }
2723 else
2724 *pu64 = 0;
2725 return rcStrict;
2726}
2727
2728
2729/**
2730 * Fetches the next opcode dword, zero extending it to a quad word.
2731 *
2732 * @returns Strict VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2734 * @param pu64 Where to return the opcode quad word.
2735 */
2736DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2737{
2738 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2739 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2740 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2741
2742 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746 pVCpu->iem.s.offOpcode = offOpcode + 4;
2747 return VINF_SUCCESS;
2748}
2749
2750#endif /* !IEM_WITH_SETJMP */
2751
2752
2753/**
2754 * Fetches the next opcode dword and zero extends it to a quad word, returns
2755 * automatically on failure.
2756 *
2757 * @param a_pu64 Where to return the opcode quad word.
2758 * @remark Implicitly references pVCpu.
2759 */
2760#ifndef IEM_WITH_SETJMP
2761# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2762 do \
2763 { \
2764 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2765 if (rcStrict2 != VINF_SUCCESS) \
2766 return rcStrict2; \
2767 } while (0)
2768#else
2769# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2770#endif
2771
2772
2773#ifndef IEM_WITH_SETJMP
2774/**
2775 * Fetches the next signed double word from the opcode stream.
2776 *
2777 * @returns Strict VBox status code.
2778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2779 * @param pi32 Where to return the signed double word.
2780 */
2781DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2782{
2783 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2784}
2785#endif
2786
2787/**
2788 * Fetches the next signed double word from the opcode stream, returning
2789 * automatically on failure.
2790 *
2791 * @param a_pi32 Where to return the signed double word.
2792 * @remark Implicitly references pVCpu.
2793 */
2794#ifndef IEM_WITH_SETJMP
2795# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2796 do \
2797 { \
2798 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2799 if (rcStrict2 != VINF_SUCCESS) \
2800 return rcStrict2; \
2801 } while (0)
2802#else
2803# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2804#endif
2805
2806#ifndef IEM_WITH_SETJMP
2807
2808/**
2809 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2810 *
2811 * @returns Strict VBox status code.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 * @param pu64 Where to return the opcode qword.
2814 */
2815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2816{
2817 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2818 if (rcStrict == VINF_SUCCESS)
2819 {
2820 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2821 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2822 pVCpu->iem.s.abOpcode[offOpcode + 1],
2823 pVCpu->iem.s.abOpcode[offOpcode + 2],
2824 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2825 pVCpu->iem.s.offOpcode = offOpcode + 4;
2826 }
2827 else
2828 *pu64 = 0;
2829 return rcStrict;
2830}
2831
2832
2833/**
2834 * Fetches the next opcode dword, sign extending it into a quad word.
2835 *
2836 * @returns Strict VBox status code.
2837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2838 * @param pu64 Where to return the opcode quad word.
2839 */
2840DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2841{
2842 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2843 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2844 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2845
2846 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2847 pVCpu->iem.s.abOpcode[offOpcode + 1],
2848 pVCpu->iem.s.abOpcode[offOpcode + 2],
2849 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2850 *pu64 = i32;
2851 pVCpu->iem.s.offOpcode = offOpcode + 4;
2852 return VINF_SUCCESS;
2853}
2854
2855#endif /* !IEM_WITH_SETJMP */
2856
2857
2858/**
2859 * Fetches the next opcode double word and sign extends it to a quad word,
2860 * returns automatically on failure.
2861 *
2862 * @param a_pu64 Where to return the opcode quad word.
2863 * @remark Implicitly references pVCpu.
2864 */
2865#ifndef IEM_WITH_SETJMP
2866# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2867 do \
2868 { \
2869 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2870 if (rcStrict2 != VINF_SUCCESS) \
2871 return rcStrict2; \
2872 } while (0)
2873#else
2874# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2875#endif
2876
2877#ifndef IEM_WITH_SETJMP
2878
2879/**
2880 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2881 *
2882 * @returns Strict VBox status code.
2883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2884 * @param pu64 Where to return the opcode qword.
2885 */
2886DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2887{
2888 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2892# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2893 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2894# else
2895 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2896 pVCpu->iem.s.abOpcode[offOpcode + 1],
2897 pVCpu->iem.s.abOpcode[offOpcode + 2],
2898 pVCpu->iem.s.abOpcode[offOpcode + 3],
2899 pVCpu->iem.s.abOpcode[offOpcode + 4],
2900 pVCpu->iem.s.abOpcode[offOpcode + 5],
2901 pVCpu->iem.s.abOpcode[offOpcode + 6],
2902 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2903# endif
2904 pVCpu->iem.s.offOpcode = offOpcode + 8;
2905 }
2906 else
2907 *pu64 = 0;
2908 return rcStrict;
2909}
2910
2911
2912/**
2913 * Fetches the next opcode qword.
2914 *
2915 * @returns Strict VBox status code.
2916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2917 * @param pu64 Where to return the opcode qword.
2918 */
2919DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2920{
2921 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2922 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2923 {
2924# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2925 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2926# else
2927 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2928 pVCpu->iem.s.abOpcode[offOpcode + 1],
2929 pVCpu->iem.s.abOpcode[offOpcode + 2],
2930 pVCpu->iem.s.abOpcode[offOpcode + 3],
2931 pVCpu->iem.s.abOpcode[offOpcode + 4],
2932 pVCpu->iem.s.abOpcode[offOpcode + 5],
2933 pVCpu->iem.s.abOpcode[offOpcode + 6],
2934 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2935# endif
2936 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2937 return VINF_SUCCESS;
2938 }
2939 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2940}
2941
2942#else /* IEM_WITH_SETJMP */
2943
2944/**
2945 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2946 *
2947 * @returns The opcode qword.
2948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2949 */
2950DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2951{
2952# ifdef IEM_WITH_CODE_TLB
2953 uint64_t u64;
2954 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2955 return u64;
2956# else
2957 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2958 if (rcStrict == VINF_SUCCESS)
2959 {
2960 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2961 pVCpu->iem.s.offOpcode = offOpcode + 8;
2962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2963 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2964# else
2965 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2966 pVCpu->iem.s.abOpcode[offOpcode + 1],
2967 pVCpu->iem.s.abOpcode[offOpcode + 2],
2968 pVCpu->iem.s.abOpcode[offOpcode + 3],
2969 pVCpu->iem.s.abOpcode[offOpcode + 4],
2970 pVCpu->iem.s.abOpcode[offOpcode + 5],
2971 pVCpu->iem.s.abOpcode[offOpcode + 6],
2972 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2973# endif
2974 }
2975 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2976# endif
2977}
2978
2979
2980/**
2981 * Fetches the next opcode qword, longjmp on error.
2982 *
2983 * @returns The opcode qword.
2984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2985 */
2986DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2987{
2988# ifdef IEM_WITH_CODE_TLB
2989 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2990 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2991 if (RT_LIKELY( pbBuf != NULL
2992 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2993 {
2994 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2995# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2996 return *(uint64_t const *)&pbBuf[offBuf];
2997# else
2998 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2999 pbBuf[offBuf + 1],
3000 pbBuf[offBuf + 2],
3001 pbBuf[offBuf + 3],
3002 pbBuf[offBuf + 4],
3003 pbBuf[offBuf + 5],
3004 pbBuf[offBuf + 6],
3005 pbBuf[offBuf + 7]);
3006# endif
3007 }
3008# else
3009 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3010 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3011 {
3012 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3013# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3014 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3015# else
3016 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3017 pVCpu->iem.s.abOpcode[offOpcode + 1],
3018 pVCpu->iem.s.abOpcode[offOpcode + 2],
3019 pVCpu->iem.s.abOpcode[offOpcode + 3],
3020 pVCpu->iem.s.abOpcode[offOpcode + 4],
3021 pVCpu->iem.s.abOpcode[offOpcode + 5],
3022 pVCpu->iem.s.abOpcode[offOpcode + 6],
3023 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3024# endif
3025 }
3026# endif
3027 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3028}
3029
3030#endif /* IEM_WITH_SETJMP */
3031
3032/**
3033 * Fetches the next opcode quad word, returns automatically on failure.
3034 *
3035 * @param a_pu64 Where to return the opcode quad word.
3036 * @remark Implicitly references pVCpu.
3037 */
3038#ifndef IEM_WITH_SETJMP
3039# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3040 do \
3041 { \
3042 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3043 if (rcStrict2 != VINF_SUCCESS) \
3044 return rcStrict2; \
3045 } while (0)
3046#else
3047# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3048#endif
3049
3050
3051/** @name Misc Worker Functions.
3052 * @{
3053 */
3054
3055
3056/**
3057 * Validates a new SS segment.
3058 *
3059 * @returns VBox strict status code.
3060 * @param pVCpu The cross context virtual CPU structure of the
3061 * calling thread.
3062 * @param pCtx The CPU context.
3063 * @param NewSS The new SS selctor.
3064 * @param uCpl The CPL to load the stack for.
3065 * @param pDesc Where to return the descriptor.
3066 */
3067IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3068{
3069 NOREF(pCtx);
3070
3071 /* Null selectors are not allowed (we're not called for dispatching
3072 interrupts with SS=0 in long mode). */
3073 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3074 {
3075 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3076 return iemRaiseTaskSwitchFault0(pVCpu);
3077 }
3078
3079 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3080 if ((NewSS & X86_SEL_RPL) != uCpl)
3081 {
3082 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3083 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3084 }
3085
3086 /*
3087 * Read the descriptor.
3088 */
3089 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3090 if (rcStrict != VINF_SUCCESS)
3091 return rcStrict;
3092
3093 /*
3094 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3095 */
3096 if (!pDesc->Legacy.Gen.u1DescType)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3099 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3100 }
3101
3102 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3103 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3104 {
3105 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3106 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3107 }
3108 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3109 {
3110 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3111 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3112 }
3113
3114 /* Is it there? */
3115 /** @todo testcase: Is this checked before the canonical / limit check below? */
3116 if (!pDesc->Legacy.Gen.u1Present)
3117 {
3118 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3119 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3120 }
3121
3122 return VINF_SUCCESS;
3123}
3124
3125
3126/**
3127 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3128 * not.
3129 *
3130 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3131 * @param a_pCtx The CPU context.
3132 */
3133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3134# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3135 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3136 ? (a_pCtx)->eflags.u \
3137 : CPUMRawGetEFlags(a_pVCpu) )
3138#else
3139# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3140 ( (a_pCtx)->eflags.u )
3141#endif
3142
3143/**
3144 * Updates the EFLAGS in the correct manner wrt. PATM.
3145 *
3146 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3147 * @param a_pCtx The CPU context.
3148 * @param a_fEfl The new EFLAGS.
3149 */
3150#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3151# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3152 do { \
3153 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3154 (a_pCtx)->eflags.u = (a_fEfl); \
3155 else \
3156 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3157 } while (0)
3158#else
3159# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3160 do { \
3161 (a_pCtx)->eflags.u = (a_fEfl); \
3162 } while (0)
3163#endif
3164
3165
3166/** @} */
3167
3168/** @name Raising Exceptions.
3169 *
3170 * @{
3171 */
3172
3173/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3174 * @{ */
3175/** CPU exception. */
3176#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3177/** External interrupt (from PIC, APIC, whatever). */
3178#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3179/** Software interrupt (int or into, not bound).
3180 * Returns to the following instruction */
3181#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3182/** Takes an error code. */
3183#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3184/** Takes a CR2. */
3185#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3186/** Generated by the breakpoint instruction. */
3187#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3188/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3189#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3190/** @} */
3191
3192
3193/**
3194 * Loads the specified stack far pointer from the TSS.
3195 *
3196 * @returns VBox strict status code.
3197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3198 * @param pCtx The CPU context.
3199 * @param uCpl The CPL to load the stack for.
3200 * @param pSelSS Where to return the new stack segment.
3201 * @param puEsp Where to return the new stack pointer.
3202 */
3203IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3204 PRTSEL pSelSS, uint32_t *puEsp)
3205{
3206 VBOXSTRICTRC rcStrict;
3207 Assert(uCpl < 4);
3208
3209 switch (pCtx->tr.Attr.n.u4Type)
3210 {
3211 /*
3212 * 16-bit TSS (X86TSS16).
3213 */
3214 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3215 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3216 {
3217 uint32_t off = uCpl * 4 + 2;
3218 if (off + 4 <= pCtx->tr.u32Limit)
3219 {
3220 /** @todo check actual access pattern here. */
3221 uint32_t u32Tmp = 0; /* gcc maybe... */
3222 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3223 if (rcStrict == VINF_SUCCESS)
3224 {
3225 *puEsp = RT_LOWORD(u32Tmp);
3226 *pSelSS = RT_HIWORD(u32Tmp);
3227 return VINF_SUCCESS;
3228 }
3229 }
3230 else
3231 {
3232 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3233 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3234 }
3235 break;
3236 }
3237
3238 /*
3239 * 32-bit TSS (X86TSS32).
3240 */
3241 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3242 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3243 {
3244 uint32_t off = uCpl * 8 + 4;
3245 if (off + 7 <= pCtx->tr.u32Limit)
3246 {
3247/** @todo check actual access pattern here. */
3248 uint64_t u64Tmp;
3249 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3250 if (rcStrict == VINF_SUCCESS)
3251 {
3252 *puEsp = u64Tmp & UINT32_MAX;
3253 *pSelSS = (RTSEL)(u64Tmp >> 32);
3254 return VINF_SUCCESS;
3255 }
3256 }
3257 else
3258 {
3259 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3260 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3261 }
3262 break;
3263 }
3264
3265 default:
3266 AssertFailed();
3267 rcStrict = VERR_IEM_IPE_4;
3268 break;
3269 }
3270
3271 *puEsp = 0; /* make gcc happy */
3272 *pSelSS = 0; /* make gcc happy */
3273 return rcStrict;
3274}
3275
3276
3277/**
3278 * Loads the specified stack pointer from the 64-bit TSS.
3279 *
3280 * @returns VBox strict status code.
3281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3282 * @param pCtx The CPU context.
3283 * @param uCpl The CPL to load the stack for.
3284 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3285 * @param puRsp Where to return the new stack pointer.
3286 */
3287IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3288{
3289 Assert(uCpl < 4);
3290 Assert(uIst < 8);
3291 *puRsp = 0; /* make gcc happy */
3292
3293 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3294
3295 uint32_t off;
3296 if (uIst)
3297 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3298 else
3299 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3300 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3301 {
3302 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3303 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3304 }
3305
3306 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3307}
3308
3309
3310/**
3311 * Adjust the CPU state according to the exception being raised.
3312 *
3313 * @param pCtx The CPU context.
3314 * @param u8Vector The exception that has been raised.
3315 */
3316DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3317{
3318 switch (u8Vector)
3319 {
3320 case X86_XCPT_DB:
3321 pCtx->dr[7] &= ~X86_DR7_GD;
3322 break;
3323 /** @todo Read the AMD and Intel exception reference... */
3324 }
3325}
3326
3327
3328/**
3329 * Implements exceptions and interrupts for real mode.
3330 *
3331 * @returns VBox strict status code.
3332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3333 * @param pCtx The CPU context.
3334 * @param cbInstr The number of bytes to offset rIP by in the return
3335 * address.
3336 * @param u8Vector The interrupt / exception vector number.
3337 * @param fFlags The flags.
3338 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3339 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3340 */
3341IEM_STATIC VBOXSTRICTRC
3342iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3343 PCPUMCTX pCtx,
3344 uint8_t cbInstr,
3345 uint8_t u8Vector,
3346 uint32_t fFlags,
3347 uint16_t uErr,
3348 uint64_t uCr2)
3349{
3350 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3351 NOREF(uErr); NOREF(uCr2);
3352
3353 /*
3354 * Read the IDT entry.
3355 */
3356 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3357 {
3358 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3359 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3360 }
3361 RTFAR16 Idte;
3362 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3363 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3364 return rcStrict;
3365
3366 /*
3367 * Push the stack frame.
3368 */
3369 uint16_t *pu16Frame;
3370 uint64_t uNewRsp;
3371 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3372 if (rcStrict != VINF_SUCCESS)
3373 return rcStrict;
3374
3375 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3376#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3377 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3378 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3379 fEfl |= UINT16_C(0xf000);
3380#endif
3381 pu16Frame[2] = (uint16_t)fEfl;
3382 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3383 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3384 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3385 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3386 return rcStrict;
3387
3388 /*
3389 * Load the vector address into cs:ip and make exception specific state
3390 * adjustments.
3391 */
3392 pCtx->cs.Sel = Idte.sel;
3393 pCtx->cs.ValidSel = Idte.sel;
3394 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3395 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3396 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3397 pCtx->rip = Idte.off;
3398 fEfl &= ~X86_EFL_IF;
3399 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3400
3401 /** @todo do we actually do this in real mode? */
3402 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3403 iemRaiseXcptAdjustState(pCtx, u8Vector);
3404
3405 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3406}
3407
3408
3409/**
3410 * Loads a NULL data selector into when coming from V8086 mode.
3411 *
3412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3413 * @param pSReg Pointer to the segment register.
3414 */
3415IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3416{
3417 pSReg->Sel = 0;
3418 pSReg->ValidSel = 0;
3419 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3420 {
3421 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3422 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3423 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3424 }
3425 else
3426 {
3427 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3428 /** @todo check this on AMD-V */
3429 pSReg->u64Base = 0;
3430 pSReg->u32Limit = 0;
3431 }
3432}
3433
3434
3435/**
3436 * Loads a segment selector during a task switch in V8086 mode.
3437 *
3438 * @param pSReg Pointer to the segment register.
3439 * @param uSel The selector value to load.
3440 */
3441IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3442{
3443 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3444 pSReg->Sel = uSel;
3445 pSReg->ValidSel = uSel;
3446 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3447 pSReg->u64Base = uSel << 4;
3448 pSReg->u32Limit = 0xffff;
3449 pSReg->Attr.u = 0xf3;
3450}
3451
3452
3453/**
3454 * Loads a NULL data selector into a selector register, both the hidden and
3455 * visible parts, in protected mode.
3456 *
3457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param pSReg Pointer to the segment register.
3459 * @param uRpl The RPL.
3460 */
3461IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3462{
3463 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3464 * data selector in protected mode. */
3465 pSReg->Sel = uRpl;
3466 pSReg->ValidSel = uRpl;
3467 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3468 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3469 {
3470 /* VT-x (Intel 3960x) observed doing something like this. */
3471 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3472 pSReg->u32Limit = UINT32_MAX;
3473 pSReg->u64Base = 0;
3474 }
3475 else
3476 {
3477 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3478 pSReg->u32Limit = 0;
3479 pSReg->u64Base = 0;
3480 }
3481}
3482
3483
3484/**
3485 * Loads a segment selector during a task switch in protected mode.
3486 *
3487 * In this task switch scenario, we would throw \#TS exceptions rather than
3488 * \#GPs.
3489 *
3490 * @returns VBox strict status code.
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pSReg Pointer to the segment register.
3493 * @param uSel The new selector value.
3494 *
3495 * @remarks This does _not_ handle CS or SS.
3496 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3497 */
3498IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3499{
3500 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3501
3502 /* Null data selector. */
3503 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3504 {
3505 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3507 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3508 return VINF_SUCCESS;
3509 }
3510
3511 /* Fetch the descriptor. */
3512 IEMSELDESC Desc;
3513 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3514 if (rcStrict != VINF_SUCCESS)
3515 {
3516 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3517 VBOXSTRICTRC_VAL(rcStrict)));
3518 return rcStrict;
3519 }
3520
3521 /* Must be a data segment or readable code segment. */
3522 if ( !Desc.Legacy.Gen.u1DescType
3523 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3524 {
3525 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3526 Desc.Legacy.Gen.u4Type));
3527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3528 }
3529
3530 /* Check privileges for data segments and non-conforming code segments. */
3531 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3532 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3533 {
3534 /* The RPL and the new CPL must be less than or equal to the DPL. */
3535 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3536 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3537 {
3538 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3539 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3541 }
3542 }
3543
3544 /* Is it there? */
3545 if (!Desc.Legacy.Gen.u1Present)
3546 {
3547 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3548 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3549 }
3550
3551 /* The base and limit. */
3552 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3553 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3554
3555 /*
3556 * Ok, everything checked out fine. Now set the accessed bit before
3557 * committing the result into the registers.
3558 */
3559 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3560 {
3561 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3562 if (rcStrict != VINF_SUCCESS)
3563 return rcStrict;
3564 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3565 }
3566
3567 /* Commit */
3568 pSReg->Sel = uSel;
3569 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3570 pSReg->u32Limit = cbLimit;
3571 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3572 pSReg->ValidSel = uSel;
3573 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3574 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3575 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3576
3577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3578 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3579 return VINF_SUCCESS;
3580}
3581
3582
3583/**
3584 * Performs a task switch.
3585 *
3586 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3587 * caller is responsible for performing the necessary checks (like DPL, TSS
3588 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3589 * reference for JMP, CALL, IRET.
3590 *
3591 * If the task switch is the due to a software interrupt or hardware exception,
3592 * the caller is responsible for validating the TSS selector and descriptor. See
3593 * Intel Instruction reference for INT n.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param pCtx The CPU context.
3598 * @param enmTaskSwitch What caused this task switch.
3599 * @param uNextEip The EIP effective after the task switch.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 * @param SelTSS The TSS selector of the new task.
3604 * @param pNewDescTSS Pointer to the new TSS descriptor.
3605 */
3606IEM_STATIC VBOXSTRICTRC
3607iemTaskSwitch(PVMCPU pVCpu,
3608 PCPUMCTX pCtx,
3609 IEMTASKSWITCH enmTaskSwitch,
3610 uint32_t uNextEip,
3611 uint32_t fFlags,
3612 uint16_t uErr,
3613 uint64_t uCr2,
3614 RTSEL SelTSS,
3615 PIEMSELDESC pNewDescTSS)
3616{
3617 Assert(!IEM_IS_REAL_MODE(pVCpu));
3618 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3619
3620 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3621 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3622 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3623 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3624 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3625
3626 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3627 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3628
3629 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3630 fIsNewTSS386, pCtx->eip, uNextEip));
3631
3632 /* Update CR2 in case it's a page-fault. */
3633 /** @todo This should probably be done much earlier in IEM/PGM. See
3634 * @bugref{5653#c49}. */
3635 if (fFlags & IEM_XCPT_FLAGS_CR2)
3636 pCtx->cr2 = uCr2;
3637
3638 /*
3639 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3640 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3641 */
3642 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3643 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3644 if (uNewTSSLimit < uNewTSSLimitMin)
3645 {
3646 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3647 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3648 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3649 }
3650
3651 /*
3652 * Check the current TSS limit. The last written byte to the current TSS during the
3653 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3654 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3655 *
3656 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3657 * end up with smaller than "legal" TSS limits.
3658 */
3659 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3660 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3661 if (uCurTSSLimit < uCurTSSLimitMin)
3662 {
3663 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3664 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3665 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3666 }
3667
3668 /*
3669 * Verify that the new TSS can be accessed and map it. Map only the required contents
3670 * and not the entire TSS.
3671 */
3672 void *pvNewTSS;
3673 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3674 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3675 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3676 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3677 * not perform correct translation if this happens. See Intel spec. 7.2.1
3678 * "Task-State Segment" */
3679 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3680 if (rcStrict != VINF_SUCCESS)
3681 {
3682 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3683 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3684 return rcStrict;
3685 }
3686
3687 /*
3688 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3689 */
3690 uint32_t u32EFlags = pCtx->eflags.u32;
3691 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3692 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3693 {
3694 PX86DESC pDescCurTSS;
3695 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3696 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3697 if (rcStrict != VINF_SUCCESS)
3698 {
3699 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3700 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703
3704 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3705 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3706 if (rcStrict != VINF_SUCCESS)
3707 {
3708 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3709 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3710 return rcStrict;
3711 }
3712
3713 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3714 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3715 {
3716 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3717 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3718 u32EFlags &= ~X86_EFL_NT;
3719 }
3720 }
3721
3722 /*
3723 * Save the CPU state into the current TSS.
3724 */
3725 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3726 if (GCPtrNewTSS == GCPtrCurTSS)
3727 {
3728 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3729 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3730 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3731 }
3732 if (fIsNewTSS386)
3733 {
3734 /*
3735 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3736 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3737 */
3738 void *pvCurTSS32;
3739 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3740 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3741 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3742 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3743 if (rcStrict != VINF_SUCCESS)
3744 {
3745 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3746 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3747 return rcStrict;
3748 }
3749
3750 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3751 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3752 pCurTSS32->eip = uNextEip;
3753 pCurTSS32->eflags = u32EFlags;
3754 pCurTSS32->eax = pCtx->eax;
3755 pCurTSS32->ecx = pCtx->ecx;
3756 pCurTSS32->edx = pCtx->edx;
3757 pCurTSS32->ebx = pCtx->ebx;
3758 pCurTSS32->esp = pCtx->esp;
3759 pCurTSS32->ebp = pCtx->ebp;
3760 pCurTSS32->esi = pCtx->esi;
3761 pCurTSS32->edi = pCtx->edi;
3762 pCurTSS32->es = pCtx->es.Sel;
3763 pCurTSS32->cs = pCtx->cs.Sel;
3764 pCurTSS32->ss = pCtx->ss.Sel;
3765 pCurTSS32->ds = pCtx->ds.Sel;
3766 pCurTSS32->fs = pCtx->fs.Sel;
3767 pCurTSS32->gs = pCtx->gs.Sel;
3768
3769 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3770 if (rcStrict != VINF_SUCCESS)
3771 {
3772 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3773 VBOXSTRICTRC_VAL(rcStrict)));
3774 return rcStrict;
3775 }
3776 }
3777 else
3778 {
3779 /*
3780 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3781 */
3782 void *pvCurTSS16;
3783 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3784 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3785 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3786 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3787 if (rcStrict != VINF_SUCCESS)
3788 {
3789 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3790 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3791 return rcStrict;
3792 }
3793
3794 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3795 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3796 pCurTSS16->ip = uNextEip;
3797 pCurTSS16->flags = u32EFlags;
3798 pCurTSS16->ax = pCtx->ax;
3799 pCurTSS16->cx = pCtx->cx;
3800 pCurTSS16->dx = pCtx->dx;
3801 pCurTSS16->bx = pCtx->bx;
3802 pCurTSS16->sp = pCtx->sp;
3803 pCurTSS16->bp = pCtx->bp;
3804 pCurTSS16->si = pCtx->si;
3805 pCurTSS16->di = pCtx->di;
3806 pCurTSS16->es = pCtx->es.Sel;
3807 pCurTSS16->cs = pCtx->cs.Sel;
3808 pCurTSS16->ss = pCtx->ss.Sel;
3809 pCurTSS16->ds = pCtx->ds.Sel;
3810
3811 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3812 if (rcStrict != VINF_SUCCESS)
3813 {
3814 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3815 VBOXSTRICTRC_VAL(rcStrict)));
3816 return rcStrict;
3817 }
3818 }
3819
3820 /*
3821 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3822 */
3823 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3824 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3825 {
3826 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3827 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3828 pNewTSS->selPrev = pCtx->tr.Sel;
3829 }
3830
3831 /*
3832 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3833 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3834 */
3835 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3836 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3837 bool fNewDebugTrap;
3838 if (fIsNewTSS386)
3839 {
3840 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3841 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3842 uNewEip = pNewTSS32->eip;
3843 uNewEflags = pNewTSS32->eflags;
3844 uNewEax = pNewTSS32->eax;
3845 uNewEcx = pNewTSS32->ecx;
3846 uNewEdx = pNewTSS32->edx;
3847 uNewEbx = pNewTSS32->ebx;
3848 uNewEsp = pNewTSS32->esp;
3849 uNewEbp = pNewTSS32->ebp;
3850 uNewEsi = pNewTSS32->esi;
3851 uNewEdi = pNewTSS32->edi;
3852 uNewES = pNewTSS32->es;
3853 uNewCS = pNewTSS32->cs;
3854 uNewSS = pNewTSS32->ss;
3855 uNewDS = pNewTSS32->ds;
3856 uNewFS = pNewTSS32->fs;
3857 uNewGS = pNewTSS32->gs;
3858 uNewLdt = pNewTSS32->selLdt;
3859 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3860 }
3861 else
3862 {
3863 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3864 uNewCr3 = 0;
3865 uNewEip = pNewTSS16->ip;
3866 uNewEflags = pNewTSS16->flags;
3867 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3868 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3869 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3870 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3871 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3872 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3873 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3874 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3875 uNewES = pNewTSS16->es;
3876 uNewCS = pNewTSS16->cs;
3877 uNewSS = pNewTSS16->ss;
3878 uNewDS = pNewTSS16->ds;
3879 uNewFS = 0;
3880 uNewGS = 0;
3881 uNewLdt = pNewTSS16->selLdt;
3882 fNewDebugTrap = false;
3883 }
3884
3885 if (GCPtrNewTSS == GCPtrCurTSS)
3886 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3887 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3888
3889 /*
3890 * We're done accessing the new TSS.
3891 */
3892 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3893 if (rcStrict != VINF_SUCCESS)
3894 {
3895 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898
3899 /*
3900 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3901 */
3902 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3903 {
3904 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3905 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3906 if (rcStrict != VINF_SUCCESS)
3907 {
3908 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3909 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3910 return rcStrict;
3911 }
3912
3913 /* Check that the descriptor indicates the new TSS is available (not busy). */
3914 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3915 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3916 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3917
3918 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3919 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3920 if (rcStrict != VINF_SUCCESS)
3921 {
3922 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3923 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3924 return rcStrict;
3925 }
3926 }
3927
3928 /*
3929 * From this point on, we're technically in the new task. We will defer exceptions
3930 * until the completion of the task switch but before executing any instructions in the new task.
3931 */
3932 pCtx->tr.Sel = SelTSS;
3933 pCtx->tr.ValidSel = SelTSS;
3934 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3935 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3936 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3937 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3938 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3939
3940 /* Set the busy bit in TR. */
3941 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3942 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3943 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3944 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3945 {
3946 uNewEflags |= X86_EFL_NT;
3947 }
3948
3949 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3950 pCtx->cr0 |= X86_CR0_TS;
3951 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3952
3953 pCtx->eip = uNewEip;
3954 pCtx->eax = uNewEax;
3955 pCtx->ecx = uNewEcx;
3956 pCtx->edx = uNewEdx;
3957 pCtx->ebx = uNewEbx;
3958 pCtx->esp = uNewEsp;
3959 pCtx->ebp = uNewEbp;
3960 pCtx->esi = uNewEsi;
3961 pCtx->edi = uNewEdi;
3962
3963 uNewEflags &= X86_EFL_LIVE_MASK;
3964 uNewEflags |= X86_EFL_RA1_MASK;
3965 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3966
3967 /*
3968 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3969 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3970 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3971 */
3972 pCtx->es.Sel = uNewES;
3973 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3974
3975 pCtx->cs.Sel = uNewCS;
3976 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3977
3978 pCtx->ss.Sel = uNewSS;
3979 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3980
3981 pCtx->ds.Sel = uNewDS;
3982 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3983
3984 pCtx->fs.Sel = uNewFS;
3985 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3986
3987 pCtx->gs.Sel = uNewGS;
3988 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3990
3991 pCtx->ldtr.Sel = uNewLdt;
3992 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3993 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3994 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3995
3996 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3997 {
3998 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3999 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4000 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4001 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4002 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4003 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4004 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4005 }
4006
4007 /*
4008 * Switch CR3 for the new task.
4009 */
4010 if ( fIsNewTSS386
4011 && (pCtx->cr0 & X86_CR0_PG))
4012 {
4013 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4014 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4015 {
4016 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4017 AssertRCSuccessReturn(rc, rc);
4018 }
4019 else
4020 pCtx->cr3 = uNewCr3;
4021
4022 /* Inform PGM. */
4023 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4024 {
4025 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4026 AssertRCReturn(rc, rc);
4027 /* ignore informational status codes */
4028 }
4029 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4030 }
4031
4032 /*
4033 * Switch LDTR for the new task.
4034 */
4035 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4036 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4037 else
4038 {
4039 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4040
4041 IEMSELDESC DescNewLdt;
4042 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4043 if (rcStrict != VINF_SUCCESS)
4044 {
4045 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4046 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4047 return rcStrict;
4048 }
4049 if ( !DescNewLdt.Legacy.Gen.u1Present
4050 || DescNewLdt.Legacy.Gen.u1DescType
4051 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4052 {
4053 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4054 uNewLdt, DescNewLdt.Legacy.u));
4055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4056 }
4057
4058 pCtx->ldtr.ValidSel = uNewLdt;
4059 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4060 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4061 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4062 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4063 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4064 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4066 }
4067
4068 IEMSELDESC DescSS;
4069 if (IEM_IS_V86_MODE(pVCpu))
4070 {
4071 pVCpu->iem.s.uCpl = 3;
4072 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4073 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4074 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4075 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4076 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4077 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4078
4079 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4080 DescSS.Legacy.u = 0;
4081 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4082 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4083 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4084 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4085 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4086 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4087 DescSS.Legacy.Gen.u2Dpl = 3;
4088 }
4089 else
4090 {
4091 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4092
4093 /*
4094 * Load the stack segment for the new task.
4095 */
4096 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4097 {
4098 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4100 }
4101
4102 /* Fetch the descriptor. */
4103 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4107 VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* SS must be a data segment and writable. */
4112 if ( !DescSS.Legacy.Gen.u1DescType
4113 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4114 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4115 {
4116 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4117 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4118 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4119 }
4120
4121 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4122 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4123 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4124 {
4125 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4126 uNewCpl));
4127 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4128 }
4129
4130 /* Is it there? */
4131 if (!DescSS.Legacy.Gen.u1Present)
4132 {
4133 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4134 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4135 }
4136
4137 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4138 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4139
4140 /* Set the accessed bit before committing the result into SS. */
4141 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4142 {
4143 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4144 if (rcStrict != VINF_SUCCESS)
4145 return rcStrict;
4146 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4147 }
4148
4149 /* Commit SS. */
4150 pCtx->ss.Sel = uNewSS;
4151 pCtx->ss.ValidSel = uNewSS;
4152 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4153 pCtx->ss.u32Limit = cbLimit;
4154 pCtx->ss.u64Base = u64Base;
4155 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4157
4158 /* CPL has changed, update IEM before loading rest of segments. */
4159 pVCpu->iem.s.uCpl = uNewCpl;
4160
4161 /*
4162 * Load the data segments for the new task.
4163 */
4164 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4165 if (rcStrict != VINF_SUCCESS)
4166 return rcStrict;
4167 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4168 if (rcStrict != VINF_SUCCESS)
4169 return rcStrict;
4170 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4171 if (rcStrict != VINF_SUCCESS)
4172 return rcStrict;
4173 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4174 if (rcStrict != VINF_SUCCESS)
4175 return rcStrict;
4176
4177 /*
4178 * Load the code segment for the new task.
4179 */
4180 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4181 {
4182 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4184 }
4185
4186 /* Fetch the descriptor. */
4187 IEMSELDESC DescCS;
4188 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4189 if (rcStrict != VINF_SUCCESS)
4190 {
4191 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4192 return rcStrict;
4193 }
4194
4195 /* CS must be a code segment. */
4196 if ( !DescCS.Legacy.Gen.u1DescType
4197 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4198 {
4199 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4200 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4201 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4202 }
4203
4204 /* For conforming CS, DPL must be less than or equal to the RPL. */
4205 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4206 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4207 {
4208 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4209 DescCS.Legacy.Gen.u2Dpl));
4210 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4211 }
4212
4213 /* For non-conforming CS, DPL must match RPL. */
4214 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4215 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4216 {
4217 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4218 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4220 }
4221
4222 /* Is it there? */
4223 if (!DescCS.Legacy.Gen.u1Present)
4224 {
4225 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4226 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4227 }
4228
4229 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4230 u64Base = X86DESC_BASE(&DescCS.Legacy);
4231
4232 /* Set the accessed bit before committing the result into CS. */
4233 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4234 {
4235 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4239 }
4240
4241 /* Commit CS. */
4242 pCtx->cs.Sel = uNewCS;
4243 pCtx->cs.ValidSel = uNewCS;
4244 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4245 pCtx->cs.u32Limit = cbLimit;
4246 pCtx->cs.u64Base = u64Base;
4247 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4249 }
4250
4251 /** @todo Debug trap. */
4252 if (fIsNewTSS386 && fNewDebugTrap)
4253 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4254
4255 /*
4256 * Construct the error code masks based on what caused this task switch.
4257 * See Intel Instruction reference for INT.
4258 */
4259 uint16_t uExt;
4260 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4261 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4262 {
4263 uExt = 1;
4264 }
4265 else
4266 uExt = 0;
4267
4268 /*
4269 * Push any error code on to the new stack.
4270 */
4271 if (fFlags & IEM_XCPT_FLAGS_ERR)
4272 {
4273 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4274 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4275 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4276
4277 /* Check that there is sufficient space on the stack. */
4278 /** @todo Factor out segment limit checking for normal/expand down segments
4279 * into a separate function. */
4280 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4281 {
4282 if ( pCtx->esp - 1 > cbLimitSS
4283 || pCtx->esp < cbStackFrame)
4284 {
4285 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4286 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4287 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4288 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4289 }
4290 }
4291 else
4292 {
4293 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4294 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4295 {
4296 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4297 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4298 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4299 }
4300 }
4301
4302
4303 if (fIsNewTSS386)
4304 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4305 else
4306 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4307 if (rcStrict != VINF_SUCCESS)
4308 {
4309 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4310 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4311 return rcStrict;
4312 }
4313 }
4314
4315 /* Check the new EIP against the new CS limit. */
4316 if (pCtx->eip > pCtx->cs.u32Limit)
4317 {
4318 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4319 pCtx->eip, pCtx->cs.u32Limit));
4320 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4321 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4322 }
4323
4324 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4325 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4326}
4327
4328
4329/**
4330 * Implements exceptions and interrupts for protected mode.
4331 *
4332 * @returns VBox strict status code.
4333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4334 * @param pCtx The CPU context.
4335 * @param cbInstr The number of bytes to offset rIP by in the return
4336 * address.
4337 * @param u8Vector The interrupt / exception vector number.
4338 * @param fFlags The flags.
4339 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4340 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4341 */
4342IEM_STATIC VBOXSTRICTRC
4343iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4344 PCPUMCTX pCtx,
4345 uint8_t cbInstr,
4346 uint8_t u8Vector,
4347 uint32_t fFlags,
4348 uint16_t uErr,
4349 uint64_t uCr2)
4350{
4351 /*
4352 * Read the IDT entry.
4353 */
4354 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4355 {
4356 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4357 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4358 }
4359 X86DESC Idte;
4360 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4361 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4362 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4363 return rcStrict;
4364 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4365 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4366 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4367
4368 /*
4369 * Check the descriptor type, DPL and such.
4370 * ASSUMES this is done in the same order as described for call-gate calls.
4371 */
4372 if (Idte.Gate.u1DescType)
4373 {
4374 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4375 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4376 }
4377 bool fTaskGate = false;
4378 uint8_t f32BitGate = true;
4379 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4380 switch (Idte.Gate.u4Type)
4381 {
4382 case X86_SEL_TYPE_SYS_UNDEFINED:
4383 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4384 case X86_SEL_TYPE_SYS_LDT:
4385 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4386 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4387 case X86_SEL_TYPE_SYS_UNDEFINED2:
4388 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4389 case X86_SEL_TYPE_SYS_UNDEFINED3:
4390 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4391 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4392 case X86_SEL_TYPE_SYS_UNDEFINED4:
4393 {
4394 /** @todo check what actually happens when the type is wrong...
4395 * esp. call gates. */
4396 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4397 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4398 }
4399
4400 case X86_SEL_TYPE_SYS_286_INT_GATE:
4401 f32BitGate = false;
4402 case X86_SEL_TYPE_SYS_386_INT_GATE:
4403 fEflToClear |= X86_EFL_IF;
4404 break;
4405
4406 case X86_SEL_TYPE_SYS_TASK_GATE:
4407 fTaskGate = true;
4408#ifndef IEM_IMPLEMENTS_TASKSWITCH
4409 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4410#endif
4411 break;
4412
4413 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4414 f32BitGate = false;
4415 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4416 break;
4417
4418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4419 }
4420
4421 /* Check DPL against CPL if applicable. */
4422 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4423 {
4424 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4425 {
4426 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4427 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4428 }
4429 }
4430
4431 /* Is it there? */
4432 if (!Idte.Gate.u1Present)
4433 {
4434 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4435 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4436 }
4437
4438 /* Is it a task-gate? */
4439 if (fTaskGate)
4440 {
4441 /*
4442 * Construct the error code masks based on what caused this task switch.
4443 * See Intel Instruction reference for INT.
4444 */
4445 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4446 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4447 RTSEL SelTSS = Idte.Gate.u16Sel;
4448
4449 /*
4450 * Fetch the TSS descriptor in the GDT.
4451 */
4452 IEMSELDESC DescTSS;
4453 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4454 if (rcStrict != VINF_SUCCESS)
4455 {
4456 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4457 VBOXSTRICTRC_VAL(rcStrict)));
4458 return rcStrict;
4459 }
4460
4461 /* The TSS descriptor must be a system segment and be available (not busy). */
4462 if ( DescTSS.Legacy.Gen.u1DescType
4463 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4464 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4465 {
4466 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4467 u8Vector, SelTSS, DescTSS.Legacy.au64));
4468 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4469 }
4470
4471 /* The TSS must be present. */
4472 if (!DescTSS.Legacy.Gen.u1Present)
4473 {
4474 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4475 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4476 }
4477
4478 /* Do the actual task switch. */
4479 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4480 }
4481
4482 /* A null CS is bad. */
4483 RTSEL NewCS = Idte.Gate.u16Sel;
4484 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4485 {
4486 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4487 return iemRaiseGeneralProtectionFault0(pVCpu);
4488 }
4489
4490 /* Fetch the descriptor for the new CS. */
4491 IEMSELDESC DescCS;
4492 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4493 if (rcStrict != VINF_SUCCESS)
4494 {
4495 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4496 return rcStrict;
4497 }
4498
4499 /* Must be a code segment. */
4500 if (!DescCS.Legacy.Gen.u1DescType)
4501 {
4502 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4503 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4504 }
4505 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4506 {
4507 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4508 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Don't allow lowering the privilege level. */
4512 /** @todo Does the lowering of privileges apply to software interrupts
4513 * only? This has bearings on the more-privileged or
4514 * same-privilege stack behavior further down. A testcase would
4515 * be nice. */
4516 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4517 {
4518 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4519 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4520 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4521 }
4522
4523 /* Make sure the selector is present. */
4524 if (!DescCS.Legacy.Gen.u1Present)
4525 {
4526 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4527 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4528 }
4529
4530 /* Check the new EIP against the new CS limit. */
4531 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4532 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4533 ? Idte.Gate.u16OffsetLow
4534 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4535 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4536 if (uNewEip > cbLimitCS)
4537 {
4538 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4539 u8Vector, uNewEip, cbLimitCS, NewCS));
4540 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4541 }
4542 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4543
4544 /* Calc the flag image to push. */
4545 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4546 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4547 fEfl &= ~X86_EFL_RF;
4548 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4549 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4550
4551 /* From V8086 mode only go to CPL 0. */
4552 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4553 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4554 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4555 {
4556 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4557 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4558 }
4559
4560 /*
4561 * If the privilege level changes, we need to get a new stack from the TSS.
4562 * This in turns means validating the new SS and ESP...
4563 */
4564 if (uNewCpl != pVCpu->iem.s.uCpl)
4565 {
4566 RTSEL NewSS;
4567 uint32_t uNewEsp;
4568 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571
4572 IEMSELDESC DescSS;
4573 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4574 if (rcStrict != VINF_SUCCESS)
4575 return rcStrict;
4576 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4577
4578 /* Check that there is sufficient space for the stack frame. */
4579 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4580 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4581 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4582 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4583
4584 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4585 {
4586 if ( uNewEsp - 1 > cbLimitSS
4587 || uNewEsp < cbStackFrame)
4588 {
4589 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4590 u8Vector, NewSS, uNewEsp, cbStackFrame));
4591 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4592 }
4593 }
4594 else
4595 {
4596 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4597 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4598 {
4599 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4600 u8Vector, NewSS, uNewEsp, cbStackFrame));
4601 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4602 }
4603 }
4604
4605 /*
4606 * Start making changes.
4607 */
4608
4609 /* Set the new CPL so that stack accesses use it. */
4610 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4611 pVCpu->iem.s.uCpl = uNewCpl;
4612
4613 /* Create the stack frame. */
4614 RTPTRUNION uStackFrame;
4615 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4616 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4617 if (rcStrict != VINF_SUCCESS)
4618 return rcStrict;
4619 void * const pvStackFrame = uStackFrame.pv;
4620 if (f32BitGate)
4621 {
4622 if (fFlags & IEM_XCPT_FLAGS_ERR)
4623 *uStackFrame.pu32++ = uErr;
4624 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4625 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4626 uStackFrame.pu32[2] = fEfl;
4627 uStackFrame.pu32[3] = pCtx->esp;
4628 uStackFrame.pu32[4] = pCtx->ss.Sel;
4629 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4630 if (fEfl & X86_EFL_VM)
4631 {
4632 uStackFrame.pu32[1] = pCtx->cs.Sel;
4633 uStackFrame.pu32[5] = pCtx->es.Sel;
4634 uStackFrame.pu32[6] = pCtx->ds.Sel;
4635 uStackFrame.pu32[7] = pCtx->fs.Sel;
4636 uStackFrame.pu32[8] = pCtx->gs.Sel;
4637 }
4638 }
4639 else
4640 {
4641 if (fFlags & IEM_XCPT_FLAGS_ERR)
4642 *uStackFrame.pu16++ = uErr;
4643 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4644 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4645 uStackFrame.pu16[2] = fEfl;
4646 uStackFrame.pu16[3] = pCtx->sp;
4647 uStackFrame.pu16[4] = pCtx->ss.Sel;
4648 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4649 if (fEfl & X86_EFL_VM)
4650 {
4651 uStackFrame.pu16[1] = pCtx->cs.Sel;
4652 uStackFrame.pu16[5] = pCtx->es.Sel;
4653 uStackFrame.pu16[6] = pCtx->ds.Sel;
4654 uStackFrame.pu16[7] = pCtx->fs.Sel;
4655 uStackFrame.pu16[8] = pCtx->gs.Sel;
4656 }
4657 }
4658 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4659 if (rcStrict != VINF_SUCCESS)
4660 return rcStrict;
4661
4662 /* Mark the selectors 'accessed' (hope this is the correct time). */
4663 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4664 * after pushing the stack frame? (Write protect the gdt + stack to
4665 * find out.) */
4666 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4667 {
4668 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4669 if (rcStrict != VINF_SUCCESS)
4670 return rcStrict;
4671 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4672 }
4673
4674 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4675 {
4676 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4677 if (rcStrict != VINF_SUCCESS)
4678 return rcStrict;
4679 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4680 }
4681
4682 /*
4683 * Start comitting the register changes (joins with the DPL=CPL branch).
4684 */
4685 pCtx->ss.Sel = NewSS;
4686 pCtx->ss.ValidSel = NewSS;
4687 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4688 pCtx->ss.u32Limit = cbLimitSS;
4689 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4690 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4691 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4692 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4693 * SP is loaded).
4694 * Need to check the other combinations too:
4695 * - 16-bit TSS, 32-bit handler
4696 * - 32-bit TSS, 16-bit handler */
4697 if (!pCtx->ss.Attr.n.u1DefBig)
4698 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4699 else
4700 pCtx->rsp = uNewEsp - cbStackFrame;
4701
4702 if (fEfl & X86_EFL_VM)
4703 {
4704 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4705 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4706 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4707 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4708 }
4709 }
4710 /*
4711 * Same privilege, no stack change and smaller stack frame.
4712 */
4713 else
4714 {
4715 uint64_t uNewRsp;
4716 RTPTRUNION uStackFrame;
4717 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4718 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4719 if (rcStrict != VINF_SUCCESS)
4720 return rcStrict;
4721 void * const pvStackFrame = uStackFrame.pv;
4722
4723 if (f32BitGate)
4724 {
4725 if (fFlags & IEM_XCPT_FLAGS_ERR)
4726 *uStackFrame.pu32++ = uErr;
4727 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4728 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4729 uStackFrame.pu32[2] = fEfl;
4730 }
4731 else
4732 {
4733 if (fFlags & IEM_XCPT_FLAGS_ERR)
4734 *uStackFrame.pu16++ = uErr;
4735 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4736 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4737 uStackFrame.pu16[2] = fEfl;
4738 }
4739 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4740 if (rcStrict != VINF_SUCCESS)
4741 return rcStrict;
4742
4743 /* Mark the CS selector as 'accessed'. */
4744 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4745 {
4746 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4747 if (rcStrict != VINF_SUCCESS)
4748 return rcStrict;
4749 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4750 }
4751
4752 /*
4753 * Start committing the register changes (joins with the other branch).
4754 */
4755 pCtx->rsp = uNewRsp;
4756 }
4757
4758 /* ... register committing continues. */
4759 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4760 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4761 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4762 pCtx->cs.u32Limit = cbLimitCS;
4763 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4764 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4765
4766 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4767 fEfl &= ~fEflToClear;
4768 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4769
4770 if (fFlags & IEM_XCPT_FLAGS_CR2)
4771 pCtx->cr2 = uCr2;
4772
4773 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4774 iemRaiseXcptAdjustState(pCtx, u8Vector);
4775
4776 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Implements exceptions and interrupts for long mode.
4782 *
4783 * @returns VBox strict status code.
4784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4785 * @param pCtx The CPU context.
4786 * @param cbInstr The number of bytes to offset rIP by in the return
4787 * address.
4788 * @param u8Vector The interrupt / exception vector number.
4789 * @param fFlags The flags.
4790 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4791 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4792 */
4793IEM_STATIC VBOXSTRICTRC
4794iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4795 PCPUMCTX pCtx,
4796 uint8_t cbInstr,
4797 uint8_t u8Vector,
4798 uint32_t fFlags,
4799 uint16_t uErr,
4800 uint64_t uCr2)
4801{
4802 /*
4803 * Read the IDT entry.
4804 */
4805 uint16_t offIdt = (uint16_t)u8Vector << 4;
4806 if (pCtx->idtr.cbIdt < offIdt + 7)
4807 {
4808 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4809 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4810 }
4811 X86DESC64 Idte;
4812 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4813 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4814 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4815 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4816 return rcStrict;
4817 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4818 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4819 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4820
4821 /*
4822 * Check the descriptor type, DPL and such.
4823 * ASSUMES this is done in the same order as described for call-gate calls.
4824 */
4825 if (Idte.Gate.u1DescType)
4826 {
4827 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4828 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4829 }
4830 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4831 switch (Idte.Gate.u4Type)
4832 {
4833 case AMD64_SEL_TYPE_SYS_INT_GATE:
4834 fEflToClear |= X86_EFL_IF;
4835 break;
4836 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4837 break;
4838
4839 default:
4840 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4841 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4842 }
4843
4844 /* Check DPL against CPL if applicable. */
4845 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4846 {
4847 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4848 {
4849 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4850 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4851 }
4852 }
4853
4854 /* Is it there? */
4855 if (!Idte.Gate.u1Present)
4856 {
4857 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4858 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4859 }
4860
4861 /* A null CS is bad. */
4862 RTSEL NewCS = Idte.Gate.u16Sel;
4863 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4864 {
4865 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4866 return iemRaiseGeneralProtectionFault0(pVCpu);
4867 }
4868
4869 /* Fetch the descriptor for the new CS. */
4870 IEMSELDESC DescCS;
4871 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4872 if (rcStrict != VINF_SUCCESS)
4873 {
4874 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4875 return rcStrict;
4876 }
4877
4878 /* Must be a 64-bit code segment. */
4879 if (!DescCS.Long.Gen.u1DescType)
4880 {
4881 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4882 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4883 }
4884 if ( !DescCS.Long.Gen.u1Long
4885 || DescCS.Long.Gen.u1DefBig
4886 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4887 {
4888 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4889 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4890 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4891 }
4892
4893 /* Don't allow lowering the privilege level. For non-conforming CS
4894 selectors, the CS.DPL sets the privilege level the trap/interrupt
4895 handler runs at. For conforming CS selectors, the CPL remains
4896 unchanged, but the CS.DPL must be <= CPL. */
4897 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4898 * when CPU in Ring-0. Result \#GP? */
4899 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4900 {
4901 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4902 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4903 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4904 }
4905
4906
4907 /* Make sure the selector is present. */
4908 if (!DescCS.Legacy.Gen.u1Present)
4909 {
4910 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4911 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4912 }
4913
4914 /* Check that the new RIP is canonical. */
4915 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4916 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4917 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4918 if (!IEM_IS_CANONICAL(uNewRip))
4919 {
4920 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4921 return iemRaiseGeneralProtectionFault0(pVCpu);
4922 }
4923
4924 /*
4925 * If the privilege level changes or if the IST isn't zero, we need to get
4926 * a new stack from the TSS.
4927 */
4928 uint64_t uNewRsp;
4929 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4930 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4931 if ( uNewCpl != pVCpu->iem.s.uCpl
4932 || Idte.Gate.u3IST != 0)
4933 {
4934 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4935 if (rcStrict != VINF_SUCCESS)
4936 return rcStrict;
4937 }
4938 else
4939 uNewRsp = pCtx->rsp;
4940 uNewRsp &= ~(uint64_t)0xf;
4941
4942 /*
4943 * Calc the flag image to push.
4944 */
4945 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4946 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4947 fEfl &= ~X86_EFL_RF;
4948 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4949 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4950
4951 /*
4952 * Start making changes.
4953 */
4954 /* Set the new CPL so that stack accesses use it. */
4955 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4956 pVCpu->iem.s.uCpl = uNewCpl;
4957
4958 /* Create the stack frame. */
4959 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4960 RTPTRUNION uStackFrame;
4961 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4962 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4963 if (rcStrict != VINF_SUCCESS)
4964 return rcStrict;
4965 void * const pvStackFrame = uStackFrame.pv;
4966
4967 if (fFlags & IEM_XCPT_FLAGS_ERR)
4968 *uStackFrame.pu64++ = uErr;
4969 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4970 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4971 uStackFrame.pu64[2] = fEfl;
4972 uStackFrame.pu64[3] = pCtx->rsp;
4973 uStackFrame.pu64[4] = pCtx->ss.Sel;
4974 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4975 if (rcStrict != VINF_SUCCESS)
4976 return rcStrict;
4977
4978 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4979 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4980 * after pushing the stack frame? (Write protect the gdt + stack to
4981 * find out.) */
4982 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4983 {
4984 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4985 if (rcStrict != VINF_SUCCESS)
4986 return rcStrict;
4987 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4988 }
4989
4990 /*
4991 * Start comitting the register changes.
4992 */
4993 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4994 * hidden registers when interrupting 32-bit or 16-bit code! */
4995 if (uNewCpl != uOldCpl)
4996 {
4997 pCtx->ss.Sel = 0 | uNewCpl;
4998 pCtx->ss.ValidSel = 0 | uNewCpl;
4999 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5000 pCtx->ss.u32Limit = UINT32_MAX;
5001 pCtx->ss.u64Base = 0;
5002 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5003 }
5004 pCtx->rsp = uNewRsp - cbStackFrame;
5005 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5006 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5007 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5008 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5009 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5010 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5011 pCtx->rip = uNewRip;
5012
5013 fEfl &= ~fEflToClear;
5014 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5015
5016 if (fFlags & IEM_XCPT_FLAGS_CR2)
5017 pCtx->cr2 = uCr2;
5018
5019 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5020 iemRaiseXcptAdjustState(pCtx, u8Vector);
5021
5022 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5023}
5024
5025
5026/**
5027 * Implements exceptions and interrupts.
5028 *
5029 * All exceptions and interrupts goes thru this function!
5030 *
5031 * @returns VBox strict status code.
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param cbInstr The number of bytes to offset rIP by in the return
5034 * address.
5035 * @param u8Vector The interrupt / exception vector number.
5036 * @param fFlags The flags.
5037 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5038 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5039 */
5040DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5041iemRaiseXcptOrInt(PVMCPU pVCpu,
5042 uint8_t cbInstr,
5043 uint8_t u8Vector,
5044 uint32_t fFlags,
5045 uint16_t uErr,
5046 uint64_t uCr2)
5047{
5048 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5049#ifdef IN_RING0
5050 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5051 AssertRCReturn(rc, rc);
5052#endif
5053
5054#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5055 /*
5056 * Flush prefetch buffer
5057 */
5058 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5059#endif
5060
5061 /*
5062 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5063 */
5064 if ( pCtx->eflags.Bits.u1VM
5065 && pCtx->eflags.Bits.u2IOPL != 3
5066 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5067 && (pCtx->cr0 & X86_CR0_PE) )
5068 {
5069 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5070 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5071 u8Vector = X86_XCPT_GP;
5072 uErr = 0;
5073 }
5074#ifdef DBGFTRACE_ENABLED
5075 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5076 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5077 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5078#endif
5079
5080 /*
5081 * Do recursion accounting.
5082 */
5083 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5084 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5085 if (pVCpu->iem.s.cXcptRecursions == 0)
5086 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5087 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5088 else
5089 {
5090 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5091 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5092
5093 /** @todo double and tripple faults. */
5094 if (pVCpu->iem.s.cXcptRecursions >= 3)
5095 {
5096#ifdef DEBUG_bird
5097 AssertFailed();
5098#endif
5099 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5100 }
5101
5102 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5103 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5104 {
5105 ....
5106 } */
5107 }
5108 pVCpu->iem.s.cXcptRecursions++;
5109 pVCpu->iem.s.uCurXcpt = u8Vector;
5110 pVCpu->iem.s.fCurXcpt = fFlags;
5111
5112 /*
5113 * Extensive logging.
5114 */
5115#if defined(LOG_ENABLED) && defined(IN_RING3)
5116 if (LogIs3Enabled())
5117 {
5118 PVM pVM = pVCpu->CTX_SUFF(pVM);
5119 char szRegs[4096];
5120 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5121 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5122 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5123 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5124 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5125 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5126 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5127 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5128 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5129 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5130 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5131 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5132 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5133 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5134 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5135 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5136 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5137 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5138 " efer=%016VR{efer}\n"
5139 " pat=%016VR{pat}\n"
5140 " sf_mask=%016VR{sf_mask}\n"
5141 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5142 " lstar=%016VR{lstar}\n"
5143 " star=%016VR{star} cstar=%016VR{cstar}\n"
5144 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5145 );
5146
5147 char szInstr[256];
5148 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5149 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5150 szInstr, sizeof(szInstr), NULL);
5151 Log3(("%s%s\n", szRegs, szInstr));
5152 }
5153#endif /* LOG_ENABLED */
5154
5155 /*
5156 * Call the mode specific worker function.
5157 */
5158 VBOXSTRICTRC rcStrict;
5159 if (!(pCtx->cr0 & X86_CR0_PE))
5160 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5161 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5162 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5163 else
5164 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5165
5166 /* Flush the prefetch buffer. */
5167#ifdef IEM_WITH_CODE_TLB
5168 pVCpu->iem.s.pbInstrBuf = NULL;
5169#else
5170 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5171#endif
5172
5173 /*
5174 * Unwind.
5175 */
5176 pVCpu->iem.s.cXcptRecursions--;
5177 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5178 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5179 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5180 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5181 return rcStrict;
5182}
5183
5184#ifdef IEM_WITH_SETJMP
5185/**
5186 * See iemRaiseXcptOrInt. Will not return.
5187 */
5188IEM_STATIC DECL_NO_RETURN(void)
5189iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5190 uint8_t cbInstr,
5191 uint8_t u8Vector,
5192 uint32_t fFlags,
5193 uint16_t uErr,
5194 uint64_t uCr2)
5195{
5196 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5197 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5198}
5199#endif
5200
5201
5202/** \#DE - 00. */
5203DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5204{
5205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5206}
5207
5208
5209/** \#DB - 01.
5210 * @note This automatically clear DR7.GD. */
5211DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5212{
5213 /** @todo set/clear RF. */
5214 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5215 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5216}
5217
5218
5219/** \#UD - 06. */
5220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5221{
5222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5223}
5224
5225
5226/** \#NM - 07. */
5227DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5228{
5229 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5230}
5231
5232
5233/** \#TS(err) - 0a. */
5234DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5235{
5236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5237}
5238
5239
5240/** \#TS(tr) - 0a. */
5241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5242{
5243 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5244 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5245}
5246
5247
5248/** \#TS(0) - 0a. */
5249DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5250{
5251 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5252 0, 0);
5253}
5254
5255
5256/** \#TS(err) - 0a. */
5257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5258{
5259 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5260 uSel & X86_SEL_MASK_OFF_RPL, 0);
5261}
5262
5263
5264/** \#NP(err) - 0b. */
5265DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5266{
5267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5268}
5269
5270
5271/** \#NP(seg) - 0b. */
5272DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5273{
5274 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5275 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5276}
5277
5278
5279/** \#NP(sel) - 0b. */
5280DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5281{
5282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5283 uSel & ~X86_SEL_RPL, 0);
5284}
5285
5286
5287/** \#SS(seg) - 0c. */
5288DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5289{
5290 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5291 uSel & ~X86_SEL_RPL, 0);
5292}
5293
5294
5295/** \#SS(err) - 0c. */
5296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5297{
5298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5299}
5300
5301
5302/** \#GP(n) - 0d. */
5303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5304{
5305 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5306}
5307
5308
5309/** \#GP(0) - 0d. */
5310DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5311{
5312 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5313}
5314
5315#ifdef IEM_WITH_SETJMP
5316/** \#GP(0) - 0d. */
5317DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5318{
5319 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5320}
5321#endif
5322
5323
5324/** \#GP(sel) - 0d. */
5325DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5326{
5327 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5328 Sel & ~X86_SEL_RPL, 0);
5329}
5330
5331
5332/** \#GP(0) - 0d. */
5333DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5334{
5335 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5336}
5337
5338
5339/** \#GP(sel) - 0d. */
5340DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5341{
5342 NOREF(iSegReg); NOREF(fAccess);
5343 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5344 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5345}
5346
5347#ifdef IEM_WITH_SETJMP
5348/** \#GP(sel) - 0d, longjmp. */
5349DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5350{
5351 NOREF(iSegReg); NOREF(fAccess);
5352 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5353 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5354}
5355#endif
5356
5357/** \#GP(sel) - 0d. */
5358DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5359{
5360 NOREF(Sel);
5361 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5362}
5363
5364#ifdef IEM_WITH_SETJMP
5365/** \#GP(sel) - 0d, longjmp. */
5366DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5367{
5368 NOREF(Sel);
5369 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5370}
5371#endif
5372
5373
5374/** \#GP(sel) - 0d. */
5375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5376{
5377 NOREF(iSegReg); NOREF(fAccess);
5378 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5379}
5380
5381#ifdef IEM_WITH_SETJMP
5382/** \#GP(sel) - 0d, longjmp. */
5383DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5384 uint32_t fAccess)
5385{
5386 NOREF(iSegReg); NOREF(fAccess);
5387 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5388}
5389#endif
5390
5391
5392/** \#PF(n) - 0e. */
5393DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5394{
5395 uint16_t uErr;
5396 switch (rc)
5397 {
5398 case VERR_PAGE_NOT_PRESENT:
5399 case VERR_PAGE_TABLE_NOT_PRESENT:
5400 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5401 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5402 uErr = 0;
5403 break;
5404
5405 default:
5406 AssertMsgFailed(("%Rrc\n", rc));
5407 case VERR_ACCESS_DENIED:
5408 uErr = X86_TRAP_PF_P;
5409 break;
5410
5411 /** @todo reserved */
5412 }
5413
5414 if (pVCpu->iem.s.uCpl == 3)
5415 uErr |= X86_TRAP_PF_US;
5416
5417 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5418 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5419 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5420 uErr |= X86_TRAP_PF_ID;
5421
5422#if 0 /* This is so much non-sense, really. Why was it done like that? */
5423 /* Note! RW access callers reporting a WRITE protection fault, will clear
5424 the READ flag before calling. So, read-modify-write accesses (RW)
5425 can safely be reported as READ faults. */
5426 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5427 uErr |= X86_TRAP_PF_RW;
5428#else
5429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5430 {
5431 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5432 uErr |= X86_TRAP_PF_RW;
5433 }
5434#endif
5435
5436 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5437 uErr, GCPtrWhere);
5438}
5439
5440#ifdef IEM_WITH_SETJMP
5441/** \#PF(n) - 0e, longjmp. */
5442IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5443{
5444 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5445}
5446#endif
5447
5448
5449/** \#MF(0) - 10. */
5450DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5451{
5452 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5453}
5454
5455
5456/** \#AC(0) - 11. */
5457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5458{
5459 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5460}
5461
5462
5463/**
5464 * Macro for calling iemCImplRaiseDivideError().
5465 *
5466 * This enables us to add/remove arguments and force different levels of
5467 * inlining as we wish.
5468 *
5469 * @return Strict VBox status code.
5470 */
5471#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5472IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5473{
5474 NOREF(cbInstr);
5475 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5476}
5477
5478
5479/**
5480 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5481 *
5482 * This enables us to add/remove arguments and force different levels of
5483 * inlining as we wish.
5484 *
5485 * @return Strict VBox status code.
5486 */
5487#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5488IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5489{
5490 NOREF(cbInstr);
5491 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5492}
5493
5494
5495/**
5496 * Macro for calling iemCImplRaiseInvalidOpcode().
5497 *
5498 * This enables us to add/remove arguments and force different levels of
5499 * inlining as we wish.
5500 *
5501 * @return Strict VBox status code.
5502 */
5503#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5504IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5505{
5506 NOREF(cbInstr);
5507 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5508}
5509
5510
5511/** @} */
5512
5513
5514/*
5515 *
5516 * Helpers routines.
5517 * Helpers routines.
5518 * Helpers routines.
5519 *
5520 */
5521
5522/**
5523 * Recalculates the effective operand size.
5524 *
5525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5526 */
5527IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5528{
5529 switch (pVCpu->iem.s.enmCpuMode)
5530 {
5531 case IEMMODE_16BIT:
5532 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5533 break;
5534 case IEMMODE_32BIT:
5535 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5536 break;
5537 case IEMMODE_64BIT:
5538 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5539 {
5540 case 0:
5541 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5542 break;
5543 case IEM_OP_PRF_SIZE_OP:
5544 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5545 break;
5546 case IEM_OP_PRF_SIZE_REX_W:
5547 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5548 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5549 break;
5550 }
5551 break;
5552 default:
5553 AssertFailed();
5554 }
5555}
5556
5557
5558/**
5559 * Sets the default operand size to 64-bit and recalculates the effective
5560 * operand size.
5561 *
5562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5563 */
5564IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5565{
5566 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5567 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5568 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5569 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5570 else
5571 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5572}
5573
5574
5575/*
5576 *
5577 * Common opcode decoders.
5578 * Common opcode decoders.
5579 * Common opcode decoders.
5580 *
5581 */
5582//#include <iprt/mem.h>
5583
5584/**
5585 * Used to add extra details about a stub case.
5586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5587 */
5588IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5589{
5590#if defined(LOG_ENABLED) && defined(IN_RING3)
5591 PVM pVM = pVCpu->CTX_SUFF(pVM);
5592 char szRegs[4096];
5593 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5594 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5595 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5596 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5597 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5598 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5599 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5600 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5601 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5602 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5603 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5604 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5605 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5606 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5607 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5608 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5609 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5610 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5611 " efer=%016VR{efer}\n"
5612 " pat=%016VR{pat}\n"
5613 " sf_mask=%016VR{sf_mask}\n"
5614 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5615 " lstar=%016VR{lstar}\n"
5616 " star=%016VR{star} cstar=%016VR{cstar}\n"
5617 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5618 );
5619
5620 char szInstr[256];
5621 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5622 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5623 szInstr, sizeof(szInstr), NULL);
5624
5625 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5626#else
5627 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5628#endif
5629}
5630
5631/**
5632 * Complains about a stub.
5633 *
5634 * Providing two versions of this macro, one for daily use and one for use when
5635 * working on IEM.
5636 */
5637#if 0
5638# define IEMOP_BITCH_ABOUT_STUB() \
5639 do { \
5640 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5641 iemOpStubMsg2(pVCpu); \
5642 RTAssertPanic(); \
5643 } while (0)
5644#else
5645# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5646#endif
5647
5648/** Stubs an opcode. */
5649#define FNIEMOP_STUB(a_Name) \
5650 FNIEMOP_DEF(a_Name) \
5651 { \
5652 RT_NOREF_PV(pVCpu); \
5653 IEMOP_BITCH_ABOUT_STUB(); \
5654 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5655 } \
5656 typedef int ignore_semicolon
5657
5658/** Stubs an opcode. */
5659#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5660 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5661 { \
5662 RT_NOREF_PV(pVCpu); \
5663 RT_NOREF_PV(a_Name0); \
5664 IEMOP_BITCH_ABOUT_STUB(); \
5665 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5666 } \
5667 typedef int ignore_semicolon
5668
5669/** Stubs an opcode which currently should raise \#UD. */
5670#define FNIEMOP_UD_STUB(a_Name) \
5671 FNIEMOP_DEF(a_Name) \
5672 { \
5673 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5674 return IEMOP_RAISE_INVALID_OPCODE(); \
5675 } \
5676 typedef int ignore_semicolon
5677
5678/** Stubs an opcode which currently should raise \#UD. */
5679#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5680 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5681 { \
5682 RT_NOREF_PV(pVCpu); \
5683 RT_NOREF_PV(a_Name0); \
5684 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5685 return IEMOP_RAISE_INVALID_OPCODE(); \
5686 } \
5687 typedef int ignore_semicolon
5688
5689
5690
5691/** @name Register Access.
5692 * @{
5693 */
5694
5695/**
5696 * Gets a reference (pointer) to the specified hidden segment register.
5697 *
5698 * @returns Hidden register reference.
5699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5700 * @param iSegReg The segment register.
5701 */
5702IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5703{
5704 Assert(iSegReg < X86_SREG_COUNT);
5705 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5706 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5707
5708#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5709 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5710 { /* likely */ }
5711 else
5712 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5713#else
5714 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5715#endif
5716 return pSReg;
5717}
5718
5719
5720/**
5721 * Ensures that the given hidden segment register is up to date.
5722 *
5723 * @returns Hidden register reference.
5724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5725 * @param pSReg The segment register.
5726 */
5727IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5728{
5729#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5730 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5731 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5732#else
5733 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5734 NOREF(pVCpu);
5735#endif
5736 return pSReg;
5737}
5738
5739
5740/**
5741 * Gets a reference (pointer) to the specified segment register (the selector
5742 * value).
5743 *
5744 * @returns Pointer to the selector variable.
5745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5746 * @param iSegReg The segment register.
5747 */
5748DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5749{
5750 Assert(iSegReg < X86_SREG_COUNT);
5751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5752 return &pCtx->aSRegs[iSegReg].Sel;
5753}
5754
5755
5756/**
5757 * Fetches the selector value of a segment register.
5758 *
5759 * @returns The selector value.
5760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5761 * @param iSegReg The segment register.
5762 */
5763DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5764{
5765 Assert(iSegReg < X86_SREG_COUNT);
5766 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5767}
5768
5769
5770/**
5771 * Gets a reference (pointer) to the specified general purpose register.
5772 *
5773 * @returns Register reference.
5774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5775 * @param iReg The general purpose register.
5776 */
5777DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5778{
5779 Assert(iReg < 16);
5780 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5781 return &pCtx->aGRegs[iReg];
5782}
5783
5784
5785/**
5786 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5787 *
5788 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5789 *
5790 * @returns Register reference.
5791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5792 * @param iReg The register.
5793 */
5794DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5795{
5796 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5797 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5798 {
5799 Assert(iReg < 16);
5800 return &pCtx->aGRegs[iReg].u8;
5801 }
5802 /* high 8-bit register. */
5803 Assert(iReg < 8);
5804 return &pCtx->aGRegs[iReg & 3].bHi;
5805}
5806
5807
5808/**
5809 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5810 *
5811 * @returns Register reference.
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param iReg The register.
5814 */
5815DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5816{
5817 Assert(iReg < 16);
5818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5819 return &pCtx->aGRegs[iReg].u16;
5820}
5821
5822
5823/**
5824 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5825 *
5826 * @returns Register reference.
5827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5828 * @param iReg The register.
5829 */
5830DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5831{
5832 Assert(iReg < 16);
5833 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5834 return &pCtx->aGRegs[iReg].u32;
5835}
5836
5837
5838/**
5839 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5840 *
5841 * @returns Register reference.
5842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5843 * @param iReg The register.
5844 */
5845DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5846{
5847 Assert(iReg < 64);
5848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5849 return &pCtx->aGRegs[iReg].u64;
5850}
5851
5852
5853/**
5854 * Fetches the value of a 8-bit general purpose register.
5855 *
5856 * @returns The register value.
5857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5858 * @param iReg The register.
5859 */
5860DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5861{
5862 return *iemGRegRefU8(pVCpu, iReg);
5863}
5864
5865
5866/**
5867 * Fetches the value of a 16-bit general purpose register.
5868 *
5869 * @returns The register value.
5870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5871 * @param iReg The register.
5872 */
5873DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5874{
5875 Assert(iReg < 16);
5876 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5877}
5878
5879
5880/**
5881 * Fetches the value of a 32-bit general purpose register.
5882 *
5883 * @returns The register value.
5884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5885 * @param iReg The register.
5886 */
5887DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5888{
5889 Assert(iReg < 16);
5890 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5891}
5892
5893
5894/**
5895 * Fetches the value of a 64-bit general purpose register.
5896 *
5897 * @returns The register value.
5898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5899 * @param iReg The register.
5900 */
5901DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5902{
5903 Assert(iReg < 16);
5904 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5905}
5906
5907
5908/**
5909 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5910 *
5911 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5912 * segment limit.
5913 *
5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5915 * @param offNextInstr The offset of the next instruction.
5916 */
5917IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5918{
5919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5920 switch (pVCpu->iem.s.enmEffOpSize)
5921 {
5922 case IEMMODE_16BIT:
5923 {
5924 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5925 if ( uNewIp > pCtx->cs.u32Limit
5926 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5927 return iemRaiseGeneralProtectionFault0(pVCpu);
5928 pCtx->rip = uNewIp;
5929 break;
5930 }
5931
5932 case IEMMODE_32BIT:
5933 {
5934 Assert(pCtx->rip <= UINT32_MAX);
5935 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5936
5937 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5938 if (uNewEip > pCtx->cs.u32Limit)
5939 return iemRaiseGeneralProtectionFault0(pVCpu);
5940 pCtx->rip = uNewEip;
5941 break;
5942 }
5943
5944 case IEMMODE_64BIT:
5945 {
5946 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5947
5948 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5949 if (!IEM_IS_CANONICAL(uNewRip))
5950 return iemRaiseGeneralProtectionFault0(pVCpu);
5951 pCtx->rip = uNewRip;
5952 break;
5953 }
5954
5955 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5956 }
5957
5958 pCtx->eflags.Bits.u1RF = 0;
5959
5960#ifndef IEM_WITH_CODE_TLB
5961 /* Flush the prefetch buffer. */
5962 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5963#endif
5964
5965 return VINF_SUCCESS;
5966}
5967
5968
5969/**
5970 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5971 *
5972 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5973 * segment limit.
5974 *
5975 * @returns Strict VBox status code.
5976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5977 * @param offNextInstr The offset of the next instruction.
5978 */
5979IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5980{
5981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5982 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5983
5984 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5985 if ( uNewIp > pCtx->cs.u32Limit
5986 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5987 return iemRaiseGeneralProtectionFault0(pVCpu);
5988 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5989 pCtx->rip = uNewIp;
5990 pCtx->eflags.Bits.u1RF = 0;
5991
5992#ifndef IEM_WITH_CODE_TLB
5993 /* Flush the prefetch buffer. */
5994 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5995#endif
5996
5997 return VINF_SUCCESS;
5998}
5999
6000
6001/**
6002 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6003 *
6004 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6005 * segment limit.
6006 *
6007 * @returns Strict VBox status code.
6008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6009 * @param offNextInstr The offset of the next instruction.
6010 */
6011IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6012{
6013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6014 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6015
6016 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6017 {
6018 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6019
6020 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6021 if (uNewEip > pCtx->cs.u32Limit)
6022 return iemRaiseGeneralProtectionFault0(pVCpu);
6023 pCtx->rip = uNewEip;
6024 }
6025 else
6026 {
6027 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6028
6029 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6030 if (!IEM_IS_CANONICAL(uNewRip))
6031 return iemRaiseGeneralProtectionFault0(pVCpu);
6032 pCtx->rip = uNewRip;
6033 }
6034 pCtx->eflags.Bits.u1RF = 0;
6035
6036#ifndef IEM_WITH_CODE_TLB
6037 /* Flush the prefetch buffer. */
6038 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6039#endif
6040
6041 return VINF_SUCCESS;
6042}
6043
6044
6045/**
6046 * Performs a near jump to the specified address.
6047 *
6048 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6049 * segment limit.
6050 *
6051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6052 * @param uNewRip The new RIP value.
6053 */
6054IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6055{
6056 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6057 switch (pVCpu->iem.s.enmEffOpSize)
6058 {
6059 case IEMMODE_16BIT:
6060 {
6061 Assert(uNewRip <= UINT16_MAX);
6062 if ( uNewRip > pCtx->cs.u32Limit
6063 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6064 return iemRaiseGeneralProtectionFault0(pVCpu);
6065 /** @todo Test 16-bit jump in 64-bit mode. */
6066 pCtx->rip = uNewRip;
6067 break;
6068 }
6069
6070 case IEMMODE_32BIT:
6071 {
6072 Assert(uNewRip <= UINT32_MAX);
6073 Assert(pCtx->rip <= UINT32_MAX);
6074 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6075
6076 if (uNewRip > pCtx->cs.u32Limit)
6077 return iemRaiseGeneralProtectionFault0(pVCpu);
6078 pCtx->rip = uNewRip;
6079 break;
6080 }
6081
6082 case IEMMODE_64BIT:
6083 {
6084 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6085
6086 if (!IEM_IS_CANONICAL(uNewRip))
6087 return iemRaiseGeneralProtectionFault0(pVCpu);
6088 pCtx->rip = uNewRip;
6089 break;
6090 }
6091
6092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6093 }
6094
6095 pCtx->eflags.Bits.u1RF = 0;
6096
6097#ifndef IEM_WITH_CODE_TLB
6098 /* Flush the prefetch buffer. */
6099 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6100#endif
6101
6102 return VINF_SUCCESS;
6103}
6104
6105
6106/**
6107 * Get the address of the top of the stack.
6108 *
6109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6110 * @param pCtx The CPU context which SP/ESP/RSP should be
6111 * read.
6112 */
6113DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6114{
6115 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6116 return pCtx->rsp;
6117 if (pCtx->ss.Attr.n.u1DefBig)
6118 return pCtx->esp;
6119 return pCtx->sp;
6120}
6121
6122
6123/**
6124 * Updates the RIP/EIP/IP to point to the next instruction.
6125 *
6126 * This function leaves the EFLAGS.RF flag alone.
6127 *
6128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6129 * @param cbInstr The number of bytes to add.
6130 */
6131IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6132{
6133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6134 switch (pVCpu->iem.s.enmCpuMode)
6135 {
6136 case IEMMODE_16BIT:
6137 Assert(pCtx->rip <= UINT16_MAX);
6138 pCtx->eip += cbInstr;
6139 pCtx->eip &= UINT32_C(0xffff);
6140 break;
6141
6142 case IEMMODE_32BIT:
6143 pCtx->eip += cbInstr;
6144 Assert(pCtx->rip <= UINT32_MAX);
6145 break;
6146
6147 case IEMMODE_64BIT:
6148 pCtx->rip += cbInstr;
6149 break;
6150 default: AssertFailed();
6151 }
6152}
6153
6154
6155#if 0
6156/**
6157 * Updates the RIP/EIP/IP to point to the next instruction.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 */
6161IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6162{
6163 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6164}
6165#endif
6166
6167
6168
6169/**
6170 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6171 *
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param cbInstr The number of bytes to add.
6174 */
6175IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6176{
6177 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6178
6179 pCtx->eflags.Bits.u1RF = 0;
6180
6181 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6182#if ARCH_BITS >= 64
6183 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6184 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6185 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6186#else
6187 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6188 pCtx->rip += cbInstr;
6189 else
6190 {
6191 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6192 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6193 }
6194#endif
6195}
6196
6197
6198/**
6199 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6200 *
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 */
6203IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6204{
6205 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6206}
6207
6208
6209/**
6210 * Adds to the stack pointer.
6211 *
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param pCtx The CPU context which SP/ESP/RSP should be
6214 * updated.
6215 * @param cbToAdd The number of bytes to add (8-bit!).
6216 */
6217DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6218{
6219 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6220 pCtx->rsp += cbToAdd;
6221 else if (pCtx->ss.Attr.n.u1DefBig)
6222 pCtx->esp += cbToAdd;
6223 else
6224 pCtx->sp += cbToAdd;
6225}
6226
6227
6228/**
6229 * Subtracts from the stack pointer.
6230 *
6231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6232 * @param pCtx The CPU context which SP/ESP/RSP should be
6233 * updated.
6234 * @param cbToSub The number of bytes to subtract (8-bit!).
6235 */
6236DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6237{
6238 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6239 pCtx->rsp -= cbToSub;
6240 else if (pCtx->ss.Attr.n.u1DefBig)
6241 pCtx->esp -= cbToSub;
6242 else
6243 pCtx->sp -= cbToSub;
6244}
6245
6246
6247/**
6248 * Adds to the temporary stack pointer.
6249 *
6250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6251 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6252 * @param cbToAdd The number of bytes to add (16-bit).
6253 * @param pCtx Where to get the current stack mode.
6254 */
6255DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6256{
6257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6258 pTmpRsp->u += cbToAdd;
6259 else if (pCtx->ss.Attr.n.u1DefBig)
6260 pTmpRsp->DWords.dw0 += cbToAdd;
6261 else
6262 pTmpRsp->Words.w0 += cbToAdd;
6263}
6264
6265
6266/**
6267 * Subtracts from the temporary stack pointer.
6268 *
6269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6270 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6271 * @param cbToSub The number of bytes to subtract.
6272 * @param pCtx Where to get the current stack mode.
6273 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6274 * expecting that.
6275 */
6276DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6277{
6278 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6279 pTmpRsp->u -= cbToSub;
6280 else if (pCtx->ss.Attr.n.u1DefBig)
6281 pTmpRsp->DWords.dw0 -= cbToSub;
6282 else
6283 pTmpRsp->Words.w0 -= cbToSub;
6284}
6285
6286
6287/**
6288 * Calculates the effective stack address for a push of the specified size as
6289 * well as the new RSP value (upper bits may be masked).
6290 *
6291 * @returns Effective stack addressf for the push.
6292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6293 * @param pCtx Where to get the current stack mode.
6294 * @param cbItem The size of the stack item to pop.
6295 * @param puNewRsp Where to return the new RSP value.
6296 */
6297DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6298{
6299 RTUINT64U uTmpRsp;
6300 RTGCPTR GCPtrTop;
6301 uTmpRsp.u = pCtx->rsp;
6302
6303 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6304 GCPtrTop = uTmpRsp.u -= cbItem;
6305 else if (pCtx->ss.Attr.n.u1DefBig)
6306 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6307 else
6308 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6309 *puNewRsp = uTmpRsp.u;
6310 return GCPtrTop;
6311}
6312
6313
6314/**
6315 * Gets the current stack pointer and calculates the value after a pop of the
6316 * specified size.
6317 *
6318 * @returns Current stack pointer.
6319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6320 * @param pCtx Where to get the current stack mode.
6321 * @param cbItem The size of the stack item to pop.
6322 * @param puNewRsp Where to return the new RSP value.
6323 */
6324DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6325{
6326 RTUINT64U uTmpRsp;
6327 RTGCPTR GCPtrTop;
6328 uTmpRsp.u = pCtx->rsp;
6329
6330 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6331 {
6332 GCPtrTop = uTmpRsp.u;
6333 uTmpRsp.u += cbItem;
6334 }
6335 else if (pCtx->ss.Attr.n.u1DefBig)
6336 {
6337 GCPtrTop = uTmpRsp.DWords.dw0;
6338 uTmpRsp.DWords.dw0 += cbItem;
6339 }
6340 else
6341 {
6342 GCPtrTop = uTmpRsp.Words.w0;
6343 uTmpRsp.Words.w0 += cbItem;
6344 }
6345 *puNewRsp = uTmpRsp.u;
6346 return GCPtrTop;
6347}
6348
6349
6350/**
6351 * Calculates the effective stack address for a push of the specified size as
6352 * well as the new temporary RSP value (upper bits may be masked).
6353 *
6354 * @returns Effective stack addressf for the push.
6355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6356 * @param pCtx Where to get the current stack mode.
6357 * @param pTmpRsp The temporary stack pointer. This is updated.
6358 * @param cbItem The size of the stack item to pop.
6359 */
6360DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6361{
6362 RTGCPTR GCPtrTop;
6363
6364 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6365 GCPtrTop = pTmpRsp->u -= cbItem;
6366 else if (pCtx->ss.Attr.n.u1DefBig)
6367 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6368 else
6369 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6370 return GCPtrTop;
6371}
6372
6373
6374/**
6375 * Gets the effective stack address for a pop of the specified size and
6376 * calculates and updates the temporary RSP.
6377 *
6378 * @returns Current stack pointer.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param pCtx Where to get the current stack mode.
6381 * @param pTmpRsp The temporary stack pointer. This is updated.
6382 * @param cbItem The size of the stack item to pop.
6383 */
6384DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6385{
6386 RTGCPTR GCPtrTop;
6387 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6388 {
6389 GCPtrTop = pTmpRsp->u;
6390 pTmpRsp->u += cbItem;
6391 }
6392 else if (pCtx->ss.Attr.n.u1DefBig)
6393 {
6394 GCPtrTop = pTmpRsp->DWords.dw0;
6395 pTmpRsp->DWords.dw0 += cbItem;
6396 }
6397 else
6398 {
6399 GCPtrTop = pTmpRsp->Words.w0;
6400 pTmpRsp->Words.w0 += cbItem;
6401 }
6402 return GCPtrTop;
6403}
6404
6405/** @} */
6406
6407
6408/** @name FPU access and helpers.
6409 *
6410 * @{
6411 */
6412
6413
6414/**
6415 * Hook for preparing to use the host FPU.
6416 *
6417 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6418 *
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 */
6421DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6422{
6423#ifdef IN_RING3
6424 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6425#else
6426 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6427#endif
6428}
6429
6430
6431/**
6432 * Hook for preparing to use the host FPU for SSE
6433 *
6434 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6435 *
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 */
6438DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6439{
6440 iemFpuPrepareUsage(pVCpu);
6441}
6442
6443
6444/**
6445 * Hook for actualizing the guest FPU state before the interpreter reads it.
6446 *
6447 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6448 *
6449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6450 */
6451DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6452{
6453#ifdef IN_RING3
6454 NOREF(pVCpu);
6455#else
6456 CPUMRZFpuStateActualizeForRead(pVCpu);
6457#endif
6458}
6459
6460
6461/**
6462 * Hook for actualizing the guest FPU state before the interpreter changes it.
6463 *
6464 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6465 *
6466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6467 */
6468DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6469{
6470#ifdef IN_RING3
6471 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6472#else
6473 CPUMRZFpuStateActualizeForChange(pVCpu);
6474#endif
6475}
6476
6477
6478/**
6479 * Hook for actualizing the guest XMM0..15 register state for read only.
6480 *
6481 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6482 *
6483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6484 */
6485DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6486{
6487#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6488 NOREF(pVCpu);
6489#else
6490 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6491#endif
6492}
6493
6494
6495/**
6496 * Hook for actualizing the guest XMM0..15 register state for read+write.
6497 *
6498 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6499 *
6500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6501 */
6502DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6503{
6504#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6505 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6506#else
6507 CPUMRZFpuStateActualizeForChange(pVCpu);
6508#endif
6509}
6510
6511
6512/**
6513 * Stores a QNaN value into a FPU register.
6514 *
6515 * @param pReg Pointer to the register.
6516 */
6517DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6518{
6519 pReg->au32[0] = UINT32_C(0x00000000);
6520 pReg->au32[1] = UINT32_C(0xc0000000);
6521 pReg->au16[4] = UINT16_C(0xffff);
6522}
6523
6524
6525/**
6526 * Updates the FOP, FPU.CS and FPUIP registers.
6527 *
6528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6529 * @param pCtx The CPU context.
6530 * @param pFpuCtx The FPU context.
6531 */
6532DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6533{
6534 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6535 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6536 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6537 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6538 {
6539 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6540 * happens in real mode here based on the fnsave and fnstenv images. */
6541 pFpuCtx->CS = 0;
6542 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6543 }
6544 else
6545 {
6546 pFpuCtx->CS = pCtx->cs.Sel;
6547 pFpuCtx->FPUIP = pCtx->rip;
6548 }
6549}
6550
6551
6552/**
6553 * Updates the x87.DS and FPUDP registers.
6554 *
6555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6556 * @param pCtx The CPU context.
6557 * @param pFpuCtx The FPU context.
6558 * @param iEffSeg The effective segment register.
6559 * @param GCPtrEff The effective address relative to @a iEffSeg.
6560 */
6561DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6562{
6563 RTSEL sel;
6564 switch (iEffSeg)
6565 {
6566 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6567 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6568 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6569 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6570 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6571 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6572 default:
6573 AssertMsgFailed(("%d\n", iEffSeg));
6574 sel = pCtx->ds.Sel;
6575 }
6576 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6577 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6578 {
6579 pFpuCtx->DS = 0;
6580 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6581 }
6582 else
6583 {
6584 pFpuCtx->DS = sel;
6585 pFpuCtx->FPUDP = GCPtrEff;
6586 }
6587}
6588
6589
6590/**
6591 * Rotates the stack registers in the push direction.
6592 *
6593 * @param pFpuCtx The FPU context.
6594 * @remarks This is a complete waste of time, but fxsave stores the registers in
6595 * stack order.
6596 */
6597DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6598{
6599 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6600 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6601 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6602 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6603 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6604 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6605 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6606 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6607 pFpuCtx->aRegs[0].r80 = r80Tmp;
6608}
6609
6610
6611/**
6612 * Rotates the stack registers in the pop direction.
6613 *
6614 * @param pFpuCtx The FPU context.
6615 * @remarks This is a complete waste of time, but fxsave stores the registers in
6616 * stack order.
6617 */
6618DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6619{
6620 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6621 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6622 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6623 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6624 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6625 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6626 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6627 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6628 pFpuCtx->aRegs[7].r80 = r80Tmp;
6629}
6630
6631
6632/**
6633 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6634 * exception prevents it.
6635 *
6636 * @param pResult The FPU operation result to push.
6637 * @param pFpuCtx The FPU context.
6638 */
6639IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6640{
6641 /* Update FSW and bail if there are pending exceptions afterwards. */
6642 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6643 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6644 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6645 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6646 {
6647 pFpuCtx->FSW = fFsw;
6648 return;
6649 }
6650
6651 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6652 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6653 {
6654 /* All is fine, push the actual value. */
6655 pFpuCtx->FTW |= RT_BIT(iNewTop);
6656 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6657 }
6658 else if (pFpuCtx->FCW & X86_FCW_IM)
6659 {
6660 /* Masked stack overflow, push QNaN. */
6661 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6662 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6663 }
6664 else
6665 {
6666 /* Raise stack overflow, don't push anything. */
6667 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6668 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6669 return;
6670 }
6671
6672 fFsw &= ~X86_FSW_TOP_MASK;
6673 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6674 pFpuCtx->FSW = fFsw;
6675
6676 iemFpuRotateStackPush(pFpuCtx);
6677}
6678
6679
6680/**
6681 * Stores a result in a FPU register and updates the FSW and FTW.
6682 *
6683 * @param pFpuCtx The FPU context.
6684 * @param pResult The result to store.
6685 * @param iStReg Which FPU register to store it in.
6686 */
6687IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6688{
6689 Assert(iStReg < 8);
6690 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6691 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6692 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6693 pFpuCtx->FTW |= RT_BIT(iReg);
6694 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6695}
6696
6697
6698/**
6699 * Only updates the FPU status word (FSW) with the result of the current
6700 * instruction.
6701 *
6702 * @param pFpuCtx The FPU context.
6703 * @param u16FSW The FSW output of the current instruction.
6704 */
6705IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6706{
6707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6708 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6709}
6710
6711
6712/**
6713 * Pops one item off the FPU stack if no pending exception prevents it.
6714 *
6715 * @param pFpuCtx The FPU context.
6716 */
6717IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6718{
6719 /* Check pending exceptions. */
6720 uint16_t uFSW = pFpuCtx->FSW;
6721 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6722 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6723 return;
6724
6725 /* TOP--. */
6726 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6727 uFSW &= ~X86_FSW_TOP_MASK;
6728 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6729 pFpuCtx->FSW = uFSW;
6730
6731 /* Mark the previous ST0 as empty. */
6732 iOldTop >>= X86_FSW_TOP_SHIFT;
6733 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6734
6735 /* Rotate the registers. */
6736 iemFpuRotateStackPop(pFpuCtx);
6737}
6738
6739
6740/**
6741 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6742 *
6743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6744 * @param pResult The FPU operation result to push.
6745 */
6746IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6747{
6748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6749 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6750 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6751 iemFpuMaybePushResult(pResult, pFpuCtx);
6752}
6753
6754
6755/**
6756 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6757 * and sets FPUDP and FPUDS.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param pResult The FPU operation result to push.
6761 * @param iEffSeg The effective segment register.
6762 * @param GCPtrEff The effective address relative to @a iEffSeg.
6763 */
6764IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6765{
6766 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6767 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6768 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6769 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6770 iemFpuMaybePushResult(pResult, pFpuCtx);
6771}
6772
6773
6774/**
6775 * Replace ST0 with the first value and push the second onto the FPU stack,
6776 * unless a pending exception prevents it.
6777 *
6778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6779 * @param pResult The FPU operation result to store and push.
6780 */
6781IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6782{
6783 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6784 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6785 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6786
6787 /* Update FSW and bail if there are pending exceptions afterwards. */
6788 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6789 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6790 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6791 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6792 {
6793 pFpuCtx->FSW = fFsw;
6794 return;
6795 }
6796
6797 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6798 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6799 {
6800 /* All is fine, push the actual value. */
6801 pFpuCtx->FTW |= RT_BIT(iNewTop);
6802 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6803 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6804 }
6805 else if (pFpuCtx->FCW & X86_FCW_IM)
6806 {
6807 /* Masked stack overflow, push QNaN. */
6808 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6809 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6810 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6811 }
6812 else
6813 {
6814 /* Raise stack overflow, don't push anything. */
6815 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6816 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6817 return;
6818 }
6819
6820 fFsw &= ~X86_FSW_TOP_MASK;
6821 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6822 pFpuCtx->FSW = fFsw;
6823
6824 iemFpuRotateStackPush(pFpuCtx);
6825}
6826
6827
6828/**
6829 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6830 * FOP.
6831 *
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 * @param pResult The result to store.
6834 * @param iStReg Which FPU register to store it in.
6835 */
6836IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6837{
6838 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6839 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6840 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6841 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6842}
6843
6844
6845/**
6846 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6847 * FOP, and then pops the stack.
6848 *
6849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6850 * @param pResult The result to store.
6851 * @param iStReg Which FPU register to store it in.
6852 */
6853IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6854{
6855 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6856 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6857 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6858 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6859 iemFpuMaybePopOne(pFpuCtx);
6860}
6861
6862
6863/**
6864 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6865 * FPUDP, and FPUDS.
6866 *
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 * @param pResult The result to store.
6869 * @param iStReg Which FPU register to store it in.
6870 * @param iEffSeg The effective memory operand selector register.
6871 * @param GCPtrEff The effective memory operand offset.
6872 */
6873IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6874 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6875{
6876 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6878 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6879 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6880 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6881}
6882
6883
6884/**
6885 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6886 * FPUDP, and FPUDS, and then pops the stack.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 * @param pResult The result to store.
6890 * @param iStReg Which FPU register to store it in.
6891 * @param iEffSeg The effective memory operand selector register.
6892 * @param GCPtrEff The effective memory operand offset.
6893 */
6894IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6895 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6896{
6897 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6898 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6899 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6900 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6901 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6902 iemFpuMaybePopOne(pFpuCtx);
6903}
6904
6905
6906/**
6907 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6912{
6913 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6914 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6915 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6916}
6917
6918
6919/**
6920 * Marks the specified stack register as free (for FFREE).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 * @param iStReg The register to free.
6924 */
6925IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6926{
6927 Assert(iStReg < 8);
6928 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6929 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6930 pFpuCtx->FTW &= ~RT_BIT(iReg);
6931}
6932
6933
6934/**
6935 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6940{
6941 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6942 uint16_t uFsw = pFpuCtx->FSW;
6943 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6944 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6945 uFsw &= ~X86_FSW_TOP_MASK;
6946 uFsw |= uTop;
6947 pFpuCtx->FSW = uFsw;
6948}
6949
6950
6951/**
6952 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 */
6956IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6957{
6958 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6959 uint16_t uFsw = pFpuCtx->FSW;
6960 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6961 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6962 uFsw &= ~X86_FSW_TOP_MASK;
6963 uFsw |= uTop;
6964 pFpuCtx->FSW = uFsw;
6965}
6966
6967
6968/**
6969 * Updates the FSW, FOP, FPUIP, and FPUCS.
6970 *
6971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6972 * @param u16FSW The FSW from the current instruction.
6973 */
6974IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6975{
6976 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6977 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6978 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6979 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6980}
6981
6982
6983/**
6984 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6985 *
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 * @param u16FSW The FSW from the current instruction.
6988 */
6989IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6990{
6991 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6992 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6993 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6994 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6995 iemFpuMaybePopOne(pFpuCtx);
6996}
6997
6998
6999/**
7000 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7001 *
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 * @param u16FSW The FSW from the current instruction.
7004 * @param iEffSeg The effective memory operand selector register.
7005 * @param GCPtrEff The effective memory operand offset.
7006 */
7007IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7008{
7009 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7010 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7011 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7012 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7013 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7014}
7015
7016
7017/**
7018 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7019 *
7020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7021 * @param u16FSW The FSW from the current instruction.
7022 */
7023IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7024{
7025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7026 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7027 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7028 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7029 iemFpuMaybePopOne(pFpuCtx);
7030 iemFpuMaybePopOne(pFpuCtx);
7031}
7032
7033
7034/**
7035 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7036 *
7037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7038 * @param u16FSW The FSW from the current instruction.
7039 * @param iEffSeg The effective memory operand selector register.
7040 * @param GCPtrEff The effective memory operand offset.
7041 */
7042IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7043{
7044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7045 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7046 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7047 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7048 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7049 iemFpuMaybePopOne(pFpuCtx);
7050}
7051
7052
7053/**
7054 * Worker routine for raising an FPU stack underflow exception.
7055 *
7056 * @param pFpuCtx The FPU context.
7057 * @param iStReg The stack register being accessed.
7058 */
7059IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7060{
7061 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7062 if (pFpuCtx->FCW & X86_FCW_IM)
7063 {
7064 /* Masked underflow. */
7065 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7066 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7067 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7068 if (iStReg != UINT8_MAX)
7069 {
7070 pFpuCtx->FTW |= RT_BIT(iReg);
7071 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7072 }
7073 }
7074 else
7075 {
7076 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7077 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7078 }
7079}
7080
7081
7082/**
7083 * Raises a FPU stack underflow exception.
7084 *
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 * @param iStReg The destination register that should be loaded
7087 * with QNaN if \#IS is not masked. Specify
7088 * UINT8_MAX if none (like for fcom).
7089 */
7090DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7091{
7092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7093 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7094 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7095 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7096}
7097
7098
7099DECL_NO_INLINE(IEM_STATIC, void)
7100iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7101{
7102 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7103 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7104 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7105 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7106 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7107}
7108
7109
7110DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7111{
7112 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7113 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7114 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7115 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7116 iemFpuMaybePopOne(pFpuCtx);
7117}
7118
7119
7120DECL_NO_INLINE(IEM_STATIC, void)
7121iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7122{
7123 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7124 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7125 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7126 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7127 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7128 iemFpuMaybePopOne(pFpuCtx);
7129}
7130
7131
7132DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7133{
7134 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7135 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7136 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7137 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7138 iemFpuMaybePopOne(pFpuCtx);
7139 iemFpuMaybePopOne(pFpuCtx);
7140}
7141
7142
7143DECL_NO_INLINE(IEM_STATIC, void)
7144iemFpuStackPushUnderflow(PVMCPU pVCpu)
7145{
7146 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7147 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7148 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7149
7150 if (pFpuCtx->FCW & X86_FCW_IM)
7151 {
7152 /* Masked overflow - Push QNaN. */
7153 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7154 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7155 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7156 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7157 pFpuCtx->FTW |= RT_BIT(iNewTop);
7158 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7159 iemFpuRotateStackPush(pFpuCtx);
7160 }
7161 else
7162 {
7163 /* Exception pending - don't change TOP or the register stack. */
7164 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7165 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7166 }
7167}
7168
7169
7170DECL_NO_INLINE(IEM_STATIC, void)
7171iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7172{
7173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7174 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7176
7177 if (pFpuCtx->FCW & X86_FCW_IM)
7178 {
7179 /* Masked overflow - Push QNaN. */
7180 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7181 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7182 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7183 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7184 pFpuCtx->FTW |= RT_BIT(iNewTop);
7185 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7186 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7187 iemFpuRotateStackPush(pFpuCtx);
7188 }
7189 else
7190 {
7191 /* Exception pending - don't change TOP or the register stack. */
7192 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7193 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7194 }
7195}
7196
7197
7198/**
7199 * Worker routine for raising an FPU stack overflow exception on a push.
7200 *
7201 * @param pFpuCtx The FPU context.
7202 */
7203IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7204{
7205 if (pFpuCtx->FCW & X86_FCW_IM)
7206 {
7207 /* Masked overflow. */
7208 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7209 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7210 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7211 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7212 pFpuCtx->FTW |= RT_BIT(iNewTop);
7213 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7214 iemFpuRotateStackPush(pFpuCtx);
7215 }
7216 else
7217 {
7218 /* Exception pending - don't change TOP or the register stack. */
7219 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7220 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7221 }
7222}
7223
7224
7225/**
7226 * Raises a FPU stack overflow exception on a push.
7227 *
7228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7229 */
7230DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7231{
7232 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7233 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7234 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7235 iemFpuStackPushOverflowOnly(pFpuCtx);
7236}
7237
7238
7239/**
7240 * Raises a FPU stack overflow exception on a push with a memory operand.
7241 *
7242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7243 * @param iEffSeg The effective memory operand selector register.
7244 * @param GCPtrEff The effective memory operand offset.
7245 */
7246DECL_NO_INLINE(IEM_STATIC, void)
7247iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7248{
7249 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7250 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7251 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7252 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7253 iemFpuStackPushOverflowOnly(pFpuCtx);
7254}
7255
7256
7257IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7258{
7259 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7260 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7261 if (pFpuCtx->FTW & RT_BIT(iReg))
7262 return VINF_SUCCESS;
7263 return VERR_NOT_FOUND;
7264}
7265
7266
7267IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7268{
7269 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7270 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7271 if (pFpuCtx->FTW & RT_BIT(iReg))
7272 {
7273 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7274 return VINF_SUCCESS;
7275 }
7276 return VERR_NOT_FOUND;
7277}
7278
7279
7280IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7281 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7282{
7283 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7284 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7285 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7286 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7287 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7288 {
7289 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7290 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7291 return VINF_SUCCESS;
7292 }
7293 return VERR_NOT_FOUND;
7294}
7295
7296
7297IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7298{
7299 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7300 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7301 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7302 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7303 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7304 {
7305 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7306 return VINF_SUCCESS;
7307 }
7308 return VERR_NOT_FOUND;
7309}
7310
7311
7312/**
7313 * Updates the FPU exception status after FCW is changed.
7314 *
7315 * @param pFpuCtx The FPU context.
7316 */
7317IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7318{
7319 uint16_t u16Fsw = pFpuCtx->FSW;
7320 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7321 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7322 else
7323 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7324 pFpuCtx->FSW = u16Fsw;
7325}
7326
7327
7328/**
7329 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7330 *
7331 * @returns The full FTW.
7332 * @param pFpuCtx The FPU context.
7333 */
7334IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7335{
7336 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7337 uint16_t u16Ftw = 0;
7338 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7339 for (unsigned iSt = 0; iSt < 8; iSt++)
7340 {
7341 unsigned const iReg = (iSt + iTop) & 7;
7342 if (!(u8Ftw & RT_BIT(iReg)))
7343 u16Ftw |= 3 << (iReg * 2); /* empty */
7344 else
7345 {
7346 uint16_t uTag;
7347 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7348 if (pr80Reg->s.uExponent == 0x7fff)
7349 uTag = 2; /* Exponent is all 1's => Special. */
7350 else if (pr80Reg->s.uExponent == 0x0000)
7351 {
7352 if (pr80Reg->s.u64Mantissa == 0x0000)
7353 uTag = 1; /* All bits are zero => Zero. */
7354 else
7355 uTag = 2; /* Must be special. */
7356 }
7357 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7358 uTag = 0; /* Valid. */
7359 else
7360 uTag = 2; /* Must be special. */
7361
7362 u16Ftw |= uTag << (iReg * 2); /* empty */
7363 }
7364 }
7365
7366 return u16Ftw;
7367}
7368
7369
7370/**
7371 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7372 *
7373 * @returns The compressed FTW.
7374 * @param u16FullFtw The full FTW to convert.
7375 */
7376IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7377{
7378 uint8_t u8Ftw = 0;
7379 for (unsigned i = 0; i < 8; i++)
7380 {
7381 if ((u16FullFtw & 3) != 3 /*empty*/)
7382 u8Ftw |= RT_BIT(i);
7383 u16FullFtw >>= 2;
7384 }
7385
7386 return u8Ftw;
7387}
7388
7389/** @} */
7390
7391
7392/** @name Memory access.
7393 *
7394 * @{
7395 */
7396
7397
7398/**
7399 * Updates the IEMCPU::cbWritten counter if applicable.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 * @param fAccess The access being accounted for.
7403 * @param cbMem The access size.
7404 */
7405DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7406{
7407 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7408 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7409 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7410}
7411
7412
7413/**
7414 * Checks if the given segment can be written to, raise the appropriate
7415 * exception if not.
7416 *
7417 * @returns VBox strict status code.
7418 *
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param pHid Pointer to the hidden register.
7421 * @param iSegReg The register number.
7422 * @param pu64BaseAddr Where to return the base address to use for the
7423 * segment. (In 64-bit code it may differ from the
7424 * base in the hidden segment.)
7425 */
7426IEM_STATIC VBOXSTRICTRC
7427iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7428{
7429 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7430 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7431 else
7432 {
7433 if (!pHid->Attr.n.u1Present)
7434 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7435
7436 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7437 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7438 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7439 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7440 *pu64BaseAddr = pHid->u64Base;
7441 }
7442 return VINF_SUCCESS;
7443}
7444
7445
7446/**
7447 * Checks if the given segment can be read from, raise the appropriate
7448 * exception if not.
7449 *
7450 * @returns VBox strict status code.
7451 *
7452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7453 * @param pHid Pointer to the hidden register.
7454 * @param iSegReg The register number.
7455 * @param pu64BaseAddr Where to return the base address to use for the
7456 * segment. (In 64-bit code it may differ from the
7457 * base in the hidden segment.)
7458 */
7459IEM_STATIC VBOXSTRICTRC
7460iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7461{
7462 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7463 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7464 else
7465 {
7466 if (!pHid->Attr.n.u1Present)
7467 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7468
7469 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7470 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7471 *pu64BaseAddr = pHid->u64Base;
7472 }
7473 return VINF_SUCCESS;
7474}
7475
7476
7477/**
7478 * Applies the segment limit, base and attributes.
7479 *
7480 * This may raise a \#GP or \#SS.
7481 *
7482 * @returns VBox strict status code.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param fAccess The kind of access which is being performed.
7486 * @param iSegReg The index of the segment register to apply.
7487 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7488 * TSS, ++).
7489 * @param cbMem The access size.
7490 * @param pGCPtrMem Pointer to the guest memory address to apply
7491 * segmentation to. Input and output parameter.
7492 */
7493IEM_STATIC VBOXSTRICTRC
7494iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7495{
7496 if (iSegReg == UINT8_MAX)
7497 return VINF_SUCCESS;
7498
7499 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7500 switch (pVCpu->iem.s.enmCpuMode)
7501 {
7502 case IEMMODE_16BIT:
7503 case IEMMODE_32BIT:
7504 {
7505 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7506 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7507
7508 if ( pSel->Attr.n.u1Present
7509 && !pSel->Attr.n.u1Unusable)
7510 {
7511 Assert(pSel->Attr.n.u1DescType);
7512 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7513 {
7514 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7515 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7516 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7517
7518 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7519 {
7520 /** @todo CPL check. */
7521 }
7522
7523 /*
7524 * There are two kinds of data selectors, normal and expand down.
7525 */
7526 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7527 {
7528 if ( GCPtrFirst32 > pSel->u32Limit
7529 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7530 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7531 }
7532 else
7533 {
7534 /*
7535 * The upper boundary is defined by the B bit, not the G bit!
7536 */
7537 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7538 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7539 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7540 }
7541 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7542 }
7543 else
7544 {
7545
7546 /*
7547 * Code selector and usually be used to read thru, writing is
7548 * only permitted in real and V8086 mode.
7549 */
7550 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7551 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7552 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7553 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7554 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7555
7556 if ( GCPtrFirst32 > pSel->u32Limit
7557 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7558 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7559
7560 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7561 {
7562 /** @todo CPL check. */
7563 }
7564
7565 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7566 }
7567 }
7568 else
7569 return iemRaiseGeneralProtectionFault0(pVCpu);
7570 return VINF_SUCCESS;
7571 }
7572
7573 case IEMMODE_64BIT:
7574 {
7575 RTGCPTR GCPtrMem = *pGCPtrMem;
7576 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7577 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7578
7579 Assert(cbMem >= 1);
7580 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7581 return VINF_SUCCESS;
7582 return iemRaiseGeneralProtectionFault0(pVCpu);
7583 }
7584
7585 default:
7586 AssertFailedReturn(VERR_IEM_IPE_7);
7587 }
7588}
7589
7590
7591/**
7592 * Translates a virtual address to a physical physical address and checks if we
7593 * can access the page as specified.
7594 *
7595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7596 * @param GCPtrMem The virtual address.
7597 * @param fAccess The intended access.
7598 * @param pGCPhysMem Where to return the physical address.
7599 */
7600IEM_STATIC VBOXSTRICTRC
7601iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7602{
7603 /** @todo Need a different PGM interface here. We're currently using
7604 * generic / REM interfaces. this won't cut it for R0 & RC. */
7605 RTGCPHYS GCPhys;
7606 uint64_t fFlags;
7607 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7608 if (RT_FAILURE(rc))
7609 {
7610 /** @todo Check unassigned memory in unpaged mode. */
7611 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7612 *pGCPhysMem = NIL_RTGCPHYS;
7613 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7614 }
7615
7616 /* If the page is writable and does not have the no-exec bit set, all
7617 access is allowed. Otherwise we'll have to check more carefully... */
7618 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7619 {
7620 /* Write to read only memory? */
7621 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7622 && !(fFlags & X86_PTE_RW)
7623 && ( pVCpu->iem.s.uCpl == 3
7624 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7625 {
7626 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7627 *pGCPhysMem = NIL_RTGCPHYS;
7628 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7629 }
7630
7631 /* Kernel memory accessed by userland? */
7632 if ( !(fFlags & X86_PTE_US)
7633 && pVCpu->iem.s.uCpl == 3
7634 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7635 {
7636 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7637 *pGCPhysMem = NIL_RTGCPHYS;
7638 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7639 }
7640
7641 /* Executing non-executable memory? */
7642 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7643 && (fFlags & X86_PTE_PAE_NX)
7644 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7645 {
7646 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7647 *pGCPhysMem = NIL_RTGCPHYS;
7648 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7649 VERR_ACCESS_DENIED);
7650 }
7651 }
7652
7653 /*
7654 * Set the dirty / access flags.
7655 * ASSUMES this is set when the address is translated rather than on committ...
7656 */
7657 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7658 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7659 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7660 {
7661 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7662 AssertRC(rc2);
7663 }
7664
7665 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7666 *pGCPhysMem = GCPhys;
7667 return VINF_SUCCESS;
7668}
7669
7670
7671
7672/**
7673 * Maps a physical page.
7674 *
7675 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7677 * @param GCPhysMem The physical address.
7678 * @param fAccess The intended access.
7679 * @param ppvMem Where to return the mapping address.
7680 * @param pLock The PGM lock.
7681 */
7682IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7683{
7684#ifdef IEM_VERIFICATION_MODE_FULL
7685 /* Force the alternative path so we can ignore writes. */
7686 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7687 {
7688 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7689 {
7690 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7691 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7692 if (RT_FAILURE(rc2))
7693 pVCpu->iem.s.fProblematicMemory = true;
7694 }
7695 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7696 }
7697#endif
7698#ifdef IEM_LOG_MEMORY_WRITES
7699 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7700 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7701#endif
7702#ifdef IEM_VERIFICATION_MODE_MINIMAL
7703 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7704#endif
7705
7706 /** @todo This API may require some improving later. A private deal with PGM
7707 * regarding locking and unlocking needs to be struct. A couple of TLBs
7708 * living in PGM, but with publicly accessible inlined access methods
7709 * could perhaps be an even better solution. */
7710 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7711 GCPhysMem,
7712 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7713 pVCpu->iem.s.fBypassHandlers,
7714 ppvMem,
7715 pLock);
7716 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7717 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7718
7719#ifdef IEM_VERIFICATION_MODE_FULL
7720 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7721 pVCpu->iem.s.fProblematicMemory = true;
7722#endif
7723 return rc;
7724}
7725
7726
7727/**
7728 * Unmap a page previously mapped by iemMemPageMap.
7729 *
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param GCPhysMem The physical address.
7732 * @param fAccess The intended access.
7733 * @param pvMem What iemMemPageMap returned.
7734 * @param pLock The PGM lock.
7735 */
7736DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7737{
7738 NOREF(pVCpu);
7739 NOREF(GCPhysMem);
7740 NOREF(fAccess);
7741 NOREF(pvMem);
7742 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7743}
7744
7745
7746/**
7747 * Looks up a memory mapping entry.
7748 *
7749 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7751 * @param pvMem The memory address.
7752 * @param fAccess The access to.
7753 */
7754DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7755{
7756 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7757 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7758 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7759 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7760 return 0;
7761 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7762 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7763 return 1;
7764 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7765 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7766 return 2;
7767 return VERR_NOT_FOUND;
7768}
7769
7770
7771/**
7772 * Finds a free memmap entry when using iNextMapping doesn't work.
7773 *
7774 * @returns Memory mapping index, 1024 on failure.
7775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7776 */
7777IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7778{
7779 /*
7780 * The easy case.
7781 */
7782 if (pVCpu->iem.s.cActiveMappings == 0)
7783 {
7784 pVCpu->iem.s.iNextMapping = 1;
7785 return 0;
7786 }
7787
7788 /* There should be enough mappings for all instructions. */
7789 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7790
7791 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7792 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7793 return i;
7794
7795 AssertFailedReturn(1024);
7796}
7797
7798
7799/**
7800 * Commits a bounce buffer that needs writing back and unmaps it.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param iMemMap The index of the buffer to commit.
7805 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7806 * Always false in ring-3, obviously.
7807 */
7808IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7809{
7810 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7811 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7812#ifdef IN_RING3
7813 Assert(!fPostponeFail);
7814 RT_NOREF_PV(fPostponeFail);
7815#endif
7816
7817 /*
7818 * Do the writing.
7819 */
7820#ifndef IEM_VERIFICATION_MODE_MINIMAL
7821 PVM pVM = pVCpu->CTX_SUFF(pVM);
7822 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7823 && !IEM_VERIFICATION_ENABLED(pVCpu))
7824 {
7825 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7826 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7827 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7828 if (!pVCpu->iem.s.fBypassHandlers)
7829 {
7830 /*
7831 * Carefully and efficiently dealing with access handler return
7832 * codes make this a little bloated.
7833 */
7834 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7836 pbBuf,
7837 cbFirst,
7838 PGMACCESSORIGIN_IEM);
7839 if (rcStrict == VINF_SUCCESS)
7840 {
7841 if (cbSecond)
7842 {
7843 rcStrict = PGMPhysWrite(pVM,
7844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7845 pbBuf + cbFirst,
7846 cbSecond,
7847 PGMACCESSORIGIN_IEM);
7848 if (rcStrict == VINF_SUCCESS)
7849 { /* nothing */ }
7850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7851 {
7852 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7855 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7856 }
7857# ifndef IN_RING3
7858 else if (fPostponeFail)
7859 {
7860 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7863 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7864 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7865 return iemSetPassUpStatus(pVCpu, rcStrict);
7866 }
7867# endif
7868 else
7869 {
7870 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7873 return rcStrict;
7874 }
7875 }
7876 }
7877 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7878 {
7879 if (!cbSecond)
7880 {
7881 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7883 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7884 }
7885 else
7886 {
7887 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7889 pbBuf + cbFirst,
7890 cbSecond,
7891 PGMACCESSORIGIN_IEM);
7892 if (rcStrict2 == VINF_SUCCESS)
7893 {
7894 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7898 }
7899 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7900 {
7901 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7904 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7905 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7906 }
7907# ifndef IN_RING3
7908 else if (fPostponeFail)
7909 {
7910 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7913 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7914 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7915 return iemSetPassUpStatus(pVCpu, rcStrict);
7916 }
7917# endif
7918 else
7919 {
7920 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7923 return rcStrict2;
7924 }
7925 }
7926 }
7927# ifndef IN_RING3
7928 else if (fPostponeFail)
7929 {
7930 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7933 if (!cbSecond)
7934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7935 else
7936 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7937 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7938 return iemSetPassUpStatus(pVCpu, rcStrict);
7939 }
7940# endif
7941 else
7942 {
7943 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7945 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7946 return rcStrict;
7947 }
7948 }
7949 else
7950 {
7951 /*
7952 * No access handlers, much simpler.
7953 */
7954 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7955 if (RT_SUCCESS(rc))
7956 {
7957 if (cbSecond)
7958 {
7959 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7960 if (RT_SUCCESS(rc))
7961 { /* likely */ }
7962 else
7963 {
7964 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7965 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7966 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7967 return rc;
7968 }
7969 }
7970 }
7971 else
7972 {
7973 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7975 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7976 return rc;
7977 }
7978 }
7979 }
7980#endif
7981
7982#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7983 /*
7984 * Record the write(s).
7985 */
7986 if (!pVCpu->iem.s.fNoRem)
7987 {
7988 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7989 if (pEvtRec)
7990 {
7991 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7992 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7993 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7994 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7995 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7996 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7997 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7998 }
7999 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8000 {
8001 pEvtRec = iemVerifyAllocRecord(pVCpu);
8002 if (pEvtRec)
8003 {
8004 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8005 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8006 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8007 memcpy(pEvtRec->u.RamWrite.ab,
8008 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8009 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8010 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8011 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8012 }
8013 }
8014 }
8015#endif
8016#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8017 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8018 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8019 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8020 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8021 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8022 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8023
8024 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8025 g_cbIemWrote = cbWrote;
8026 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8027#endif
8028
8029 /*
8030 * Free the mapping entry.
8031 */
8032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8033 Assert(pVCpu->iem.s.cActiveMappings != 0);
8034 pVCpu->iem.s.cActiveMappings--;
8035 return VINF_SUCCESS;
8036}
8037
8038
8039/**
8040 * iemMemMap worker that deals with a request crossing pages.
8041 */
8042IEM_STATIC VBOXSTRICTRC
8043iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8044{
8045 /*
8046 * Do the address translations.
8047 */
8048 RTGCPHYS GCPhysFirst;
8049 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8050 if (rcStrict != VINF_SUCCESS)
8051 return rcStrict;
8052
8053 RTGCPHYS GCPhysSecond;
8054 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8055 fAccess, &GCPhysSecond);
8056 if (rcStrict != VINF_SUCCESS)
8057 return rcStrict;
8058 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8059
8060 PVM pVM = pVCpu->CTX_SUFF(pVM);
8061#ifdef IEM_VERIFICATION_MODE_FULL
8062 /*
8063 * Detect problematic memory when verifying so we can select
8064 * the right execution engine. (TLB: Redo this.)
8065 */
8066 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8067 {
8068 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8069 if (RT_SUCCESS(rc2))
8070 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8071 if (RT_FAILURE(rc2))
8072 pVCpu->iem.s.fProblematicMemory = true;
8073 }
8074#endif
8075
8076
8077 /*
8078 * Read in the current memory content if it's a read, execute or partial
8079 * write access.
8080 */
8081 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8082 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8083 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8084
8085 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8086 {
8087 if (!pVCpu->iem.s.fBypassHandlers)
8088 {
8089 /*
8090 * Must carefully deal with access handler status codes here,
8091 * makes the code a bit bloated.
8092 */
8093 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8094 if (rcStrict == VINF_SUCCESS)
8095 {
8096 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8097 if (rcStrict == VINF_SUCCESS)
8098 { /*likely */ }
8099 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8101 else
8102 {
8103 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8104 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8105 return rcStrict;
8106 }
8107 }
8108 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8109 {
8110 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8111 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8112 {
8113 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8114 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8115 }
8116 else
8117 {
8118 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8119 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8120 return rcStrict2;
8121 }
8122 }
8123 else
8124 {
8125 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8126 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8127 return rcStrict;
8128 }
8129 }
8130 else
8131 {
8132 /*
8133 * No informational status codes here, much more straight forward.
8134 */
8135 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8136 if (RT_SUCCESS(rc))
8137 {
8138 Assert(rc == VINF_SUCCESS);
8139 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8140 if (RT_SUCCESS(rc))
8141 Assert(rc == VINF_SUCCESS);
8142 else
8143 {
8144 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8145 return rc;
8146 }
8147 }
8148 else
8149 {
8150 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8151 return rc;
8152 }
8153 }
8154
8155#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8156 if ( !pVCpu->iem.s.fNoRem
8157 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8158 {
8159 /*
8160 * Record the reads.
8161 */
8162 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8163 if (pEvtRec)
8164 {
8165 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8166 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8167 pEvtRec->u.RamRead.cb = cbFirstPage;
8168 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8169 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8170 }
8171 pEvtRec = iemVerifyAllocRecord(pVCpu);
8172 if (pEvtRec)
8173 {
8174 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8175 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8176 pEvtRec->u.RamRead.cb = cbSecondPage;
8177 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8178 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8179 }
8180 }
8181#endif
8182 }
8183#ifdef VBOX_STRICT
8184 else
8185 memset(pbBuf, 0xcc, cbMem);
8186 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8187 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8188#endif
8189
8190 /*
8191 * Commit the bounce buffer entry.
8192 */
8193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8194 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8195 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8196 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8197 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8198 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8199 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8200 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8201 pVCpu->iem.s.cActiveMappings++;
8202
8203 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8204 *ppvMem = pbBuf;
8205 return VINF_SUCCESS;
8206}
8207
8208
8209/**
8210 * iemMemMap woker that deals with iemMemPageMap failures.
8211 */
8212IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8213 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8214{
8215 /*
8216 * Filter out conditions we can handle and the ones which shouldn't happen.
8217 */
8218 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8219 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8220 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8221 {
8222 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8223 return rcMap;
8224 }
8225 pVCpu->iem.s.cPotentialExits++;
8226
8227 /*
8228 * Read in the current memory content if it's a read, execute or partial
8229 * write access.
8230 */
8231 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8232 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8233 {
8234 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8235 memset(pbBuf, 0xff, cbMem);
8236 else
8237 {
8238 int rc;
8239 if (!pVCpu->iem.s.fBypassHandlers)
8240 {
8241 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8242 if (rcStrict == VINF_SUCCESS)
8243 { /* nothing */ }
8244 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8245 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8246 else
8247 {
8248 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8249 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8250 return rcStrict;
8251 }
8252 }
8253 else
8254 {
8255 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8256 if (RT_SUCCESS(rc))
8257 { /* likely */ }
8258 else
8259 {
8260 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8261 GCPhysFirst, rc));
8262 return rc;
8263 }
8264 }
8265 }
8266
8267#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8268 if ( !pVCpu->iem.s.fNoRem
8269 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8270 {
8271 /*
8272 * Record the read.
8273 */
8274 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8275 if (pEvtRec)
8276 {
8277 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8278 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8279 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8280 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8281 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8282 }
8283 }
8284#endif
8285 }
8286#ifdef VBOX_STRICT
8287 else
8288 memset(pbBuf, 0xcc, cbMem);
8289#endif
8290#ifdef VBOX_STRICT
8291 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8292 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8293#endif
8294
8295 /*
8296 * Commit the bounce buffer entry.
8297 */
8298 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8299 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8300 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8301 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8302 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8303 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8304 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8305 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8306 pVCpu->iem.s.cActiveMappings++;
8307
8308 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8309 *ppvMem = pbBuf;
8310 return VINF_SUCCESS;
8311}
8312
8313
8314
8315/**
8316 * Maps the specified guest memory for the given kind of access.
8317 *
8318 * This may be using bounce buffering of the memory if it's crossing a page
8319 * boundary or if there is an access handler installed for any of it. Because
8320 * of lock prefix guarantees, we're in for some extra clutter when this
8321 * happens.
8322 *
8323 * This may raise a \#GP, \#SS, \#PF or \#AC.
8324 *
8325 * @returns VBox strict status code.
8326 *
8327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8328 * @param ppvMem Where to return the pointer to the mapped
8329 * memory.
8330 * @param cbMem The number of bytes to map. This is usually 1,
8331 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8332 * string operations it can be up to a page.
8333 * @param iSegReg The index of the segment register to use for
8334 * this access. The base and limits are checked.
8335 * Use UINT8_MAX to indicate that no segmentation
8336 * is required (for IDT, GDT and LDT accesses).
8337 * @param GCPtrMem The address of the guest memory.
8338 * @param fAccess How the memory is being accessed. The
8339 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8340 * how to map the memory, while the
8341 * IEM_ACCESS_WHAT_XXX bit is used when raising
8342 * exceptions.
8343 */
8344IEM_STATIC VBOXSTRICTRC
8345iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8346{
8347 /*
8348 * Check the input and figure out which mapping entry to use.
8349 */
8350 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8351 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8352 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8353
8354 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8355 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8356 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8357 {
8358 iMemMap = iemMemMapFindFree(pVCpu);
8359 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8360 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8361 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8362 pVCpu->iem.s.aMemMappings[2].fAccess),
8363 VERR_IEM_IPE_9);
8364 }
8365
8366 /*
8367 * Map the memory, checking that we can actually access it. If something
8368 * slightly complicated happens, fall back on bounce buffering.
8369 */
8370 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8371 if (rcStrict != VINF_SUCCESS)
8372 return rcStrict;
8373
8374 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8375 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8376
8377 RTGCPHYS GCPhysFirst;
8378 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8379 if (rcStrict != VINF_SUCCESS)
8380 return rcStrict;
8381
8382 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8383 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8384 if (fAccess & IEM_ACCESS_TYPE_READ)
8385 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8386
8387 void *pvMem;
8388 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8389 if (rcStrict != VINF_SUCCESS)
8390 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8391
8392 /*
8393 * Fill in the mapping table entry.
8394 */
8395 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8396 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8397 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8398 pVCpu->iem.s.cActiveMappings++;
8399
8400 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8401 *ppvMem = pvMem;
8402 return VINF_SUCCESS;
8403}
8404
8405
8406/**
8407 * Commits the guest memory if bounce buffered and unmaps it.
8408 *
8409 * @returns Strict VBox status code.
8410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8411 * @param pvMem The mapping.
8412 * @param fAccess The kind of access.
8413 */
8414IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8415{
8416 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8417 AssertReturn(iMemMap >= 0, iMemMap);
8418
8419 /* If it's bounce buffered, we may need to write back the buffer. */
8420 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8421 {
8422 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8423 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8424 }
8425 /* Otherwise unlock it. */
8426 else
8427 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8428
8429 /* Free the entry. */
8430 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8431 Assert(pVCpu->iem.s.cActiveMappings != 0);
8432 pVCpu->iem.s.cActiveMappings--;
8433 return VINF_SUCCESS;
8434}
8435
8436#ifdef IEM_WITH_SETJMP
8437
8438/**
8439 * Maps the specified guest memory for the given kind of access, longjmp on
8440 * error.
8441 *
8442 * This may be using bounce buffering of the memory if it's crossing a page
8443 * boundary or if there is an access handler installed for any of it. Because
8444 * of lock prefix guarantees, we're in for some extra clutter when this
8445 * happens.
8446 *
8447 * This may raise a \#GP, \#SS, \#PF or \#AC.
8448 *
8449 * @returns Pointer to the mapped memory.
8450 *
8451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8452 * @param cbMem The number of bytes to map. This is usually 1,
8453 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8454 * string operations it can be up to a page.
8455 * @param iSegReg The index of the segment register to use for
8456 * this access. The base and limits are checked.
8457 * Use UINT8_MAX to indicate that no segmentation
8458 * is required (for IDT, GDT and LDT accesses).
8459 * @param GCPtrMem The address of the guest memory.
8460 * @param fAccess How the memory is being accessed. The
8461 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8462 * how to map the memory, while the
8463 * IEM_ACCESS_WHAT_XXX bit is used when raising
8464 * exceptions.
8465 */
8466IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8467{
8468 /*
8469 * Check the input and figure out which mapping entry to use.
8470 */
8471 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8472 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8473 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8474
8475 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8476 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8477 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8478 {
8479 iMemMap = iemMemMapFindFree(pVCpu);
8480 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8481 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8482 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8483 pVCpu->iem.s.aMemMappings[2].fAccess),
8484 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8485 }
8486
8487 /*
8488 * Map the memory, checking that we can actually access it. If something
8489 * slightly complicated happens, fall back on bounce buffering.
8490 */
8491 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8492 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8493 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8494
8495 /* Crossing a page boundary? */
8496 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8497 { /* No (likely). */ }
8498 else
8499 {
8500 void *pvMem;
8501 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8502 if (rcStrict == VINF_SUCCESS)
8503 return pvMem;
8504 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8505 }
8506
8507 RTGCPHYS GCPhysFirst;
8508 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8509 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8510 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8511
8512 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8513 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8514 if (fAccess & IEM_ACCESS_TYPE_READ)
8515 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8516
8517 void *pvMem;
8518 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8519 if (rcStrict == VINF_SUCCESS)
8520 { /* likely */ }
8521 else
8522 {
8523 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8524 if (rcStrict == VINF_SUCCESS)
8525 return pvMem;
8526 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8527 }
8528
8529 /*
8530 * Fill in the mapping table entry.
8531 */
8532 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8533 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8534 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8535 pVCpu->iem.s.cActiveMappings++;
8536
8537 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8538 return pvMem;
8539}
8540
8541
8542/**
8543 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8544 *
8545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8546 * @param pvMem The mapping.
8547 * @param fAccess The kind of access.
8548 */
8549IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8550{
8551 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8552 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8553
8554 /* If it's bounce buffered, we may need to write back the buffer. */
8555 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8556 {
8557 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8558 {
8559 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8560 if (rcStrict == VINF_SUCCESS)
8561 return;
8562 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8563 }
8564 }
8565 /* Otherwise unlock it. */
8566 else
8567 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8568
8569 /* Free the entry. */
8570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8571 Assert(pVCpu->iem.s.cActiveMappings != 0);
8572 pVCpu->iem.s.cActiveMappings--;
8573}
8574
8575#endif
8576
8577#ifndef IN_RING3
8578/**
8579 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8580 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8581 *
8582 * Allows the instruction to be completed and retired, while the IEM user will
8583 * return to ring-3 immediately afterwards and do the postponed writes there.
8584 *
8585 * @returns VBox status code (no strict statuses). Caller must check
8586 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8588 * @param pvMem The mapping.
8589 * @param fAccess The kind of access.
8590 */
8591IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8592{
8593 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8594 AssertReturn(iMemMap >= 0, iMemMap);
8595
8596 /* If it's bounce buffered, we may need to write back the buffer. */
8597 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8598 {
8599 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8600 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8601 }
8602 /* Otherwise unlock it. */
8603 else
8604 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8605
8606 /* Free the entry. */
8607 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8608 Assert(pVCpu->iem.s.cActiveMappings != 0);
8609 pVCpu->iem.s.cActiveMappings--;
8610 return VINF_SUCCESS;
8611}
8612#endif
8613
8614
8615/**
8616 * Rollbacks mappings, releasing page locks and such.
8617 *
8618 * The caller shall only call this after checking cActiveMappings.
8619 *
8620 * @returns Strict VBox status code to pass up.
8621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8622 */
8623IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8624{
8625 Assert(pVCpu->iem.s.cActiveMappings > 0);
8626
8627 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8628 while (iMemMap-- > 0)
8629 {
8630 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8631 if (fAccess != IEM_ACCESS_INVALID)
8632 {
8633 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8634 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8635 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8636 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8637 Assert(pVCpu->iem.s.cActiveMappings > 0);
8638 pVCpu->iem.s.cActiveMappings--;
8639 }
8640 }
8641}
8642
8643
8644/**
8645 * Fetches a data byte.
8646 *
8647 * @returns Strict VBox status code.
8648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8649 * @param pu8Dst Where to return the byte.
8650 * @param iSegReg The index of the segment register to use for
8651 * this access. The base and limits are checked.
8652 * @param GCPtrMem The address of the guest memory.
8653 */
8654IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8655{
8656 /* The lazy approach for now... */
8657 uint8_t const *pu8Src;
8658 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8659 if (rc == VINF_SUCCESS)
8660 {
8661 *pu8Dst = *pu8Src;
8662 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8663 }
8664 return rc;
8665}
8666
8667
8668#ifdef IEM_WITH_SETJMP
8669/**
8670 * Fetches a data byte, longjmp on error.
8671 *
8672 * @returns The byte.
8673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8674 * @param iSegReg The index of the segment register to use for
8675 * this access. The base and limits are checked.
8676 * @param GCPtrMem The address of the guest memory.
8677 */
8678DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8679{
8680 /* The lazy approach for now... */
8681 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8682 uint8_t const bRet = *pu8Src;
8683 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8684 return bRet;
8685}
8686#endif /* IEM_WITH_SETJMP */
8687
8688
8689/**
8690 * Fetches a data word.
8691 *
8692 * @returns Strict VBox status code.
8693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8694 * @param pu16Dst Where to return the word.
8695 * @param iSegReg The index of the segment register to use for
8696 * this access. The base and limits are checked.
8697 * @param GCPtrMem The address of the guest memory.
8698 */
8699IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8700{
8701 /* The lazy approach for now... */
8702 uint16_t const *pu16Src;
8703 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8704 if (rc == VINF_SUCCESS)
8705 {
8706 *pu16Dst = *pu16Src;
8707 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8708 }
8709 return rc;
8710}
8711
8712
8713#ifdef IEM_WITH_SETJMP
8714/**
8715 * Fetches a data word, longjmp on error.
8716 *
8717 * @returns The word
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param iSegReg The index of the segment register to use for
8720 * this access. The base and limits are checked.
8721 * @param GCPtrMem The address of the guest memory.
8722 */
8723DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8724{
8725 /* The lazy approach for now... */
8726 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8727 uint16_t const u16Ret = *pu16Src;
8728 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8729 return u16Ret;
8730}
8731#endif
8732
8733
8734/**
8735 * Fetches a data dword.
8736 *
8737 * @returns Strict VBox status code.
8738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8739 * @param pu32Dst Where to return the dword.
8740 * @param iSegReg The index of the segment register to use for
8741 * this access. The base and limits are checked.
8742 * @param GCPtrMem The address of the guest memory.
8743 */
8744IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8745{
8746 /* The lazy approach for now... */
8747 uint32_t const *pu32Src;
8748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8749 if (rc == VINF_SUCCESS)
8750 {
8751 *pu32Dst = *pu32Src;
8752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8753 }
8754 return rc;
8755}
8756
8757
8758#ifdef IEM_WITH_SETJMP
8759
8760IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8761{
8762 Assert(cbMem >= 1);
8763 Assert(iSegReg < X86_SREG_COUNT);
8764
8765 /*
8766 * 64-bit mode is simpler.
8767 */
8768 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8769 {
8770 if (iSegReg >= X86_SREG_FS)
8771 {
8772 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8773 GCPtrMem += pSel->u64Base;
8774 }
8775
8776 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8777 return GCPtrMem;
8778 }
8779 /*
8780 * 16-bit and 32-bit segmentation.
8781 */
8782 else
8783 {
8784 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8785 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8786 == X86DESCATTR_P /* data, expand up */
8787 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8788 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8789 {
8790 /* expand up */
8791 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8792 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8793 && GCPtrLast32 > (uint32_t)GCPtrMem))
8794 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8795 }
8796 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8797 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8798 {
8799 /* expand down */
8800 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8801 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8802 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8803 && GCPtrLast32 > (uint32_t)GCPtrMem))
8804 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8805 }
8806 else
8807 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8808 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8809 }
8810 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8811}
8812
8813
8814IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8815{
8816 Assert(cbMem >= 1);
8817 Assert(iSegReg < X86_SREG_COUNT);
8818
8819 /*
8820 * 64-bit mode is simpler.
8821 */
8822 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8823 {
8824 if (iSegReg >= X86_SREG_FS)
8825 {
8826 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8827 GCPtrMem += pSel->u64Base;
8828 }
8829
8830 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8831 return GCPtrMem;
8832 }
8833 /*
8834 * 16-bit and 32-bit segmentation.
8835 */
8836 else
8837 {
8838 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8839 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8840 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8841 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8842 {
8843 /* expand up */
8844 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8845 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8846 && GCPtrLast32 > (uint32_t)GCPtrMem))
8847 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8848 }
8849 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8850 {
8851 /* expand down */
8852 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8853 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8854 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8855 && GCPtrLast32 > (uint32_t)GCPtrMem))
8856 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8857 }
8858 else
8859 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8860 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8861 }
8862 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8863}
8864
8865
8866/**
8867 * Fetches a data dword, longjmp on error, fallback/safe version.
8868 *
8869 * @returns The dword
8870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8871 * @param iSegReg The index of the segment register to use for
8872 * this access. The base and limits are checked.
8873 * @param GCPtrMem The address of the guest memory.
8874 */
8875IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8876{
8877 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8878 uint32_t const u32Ret = *pu32Src;
8879 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8880 return u32Ret;
8881}
8882
8883
8884/**
8885 * Fetches a data dword, longjmp on error.
8886 *
8887 * @returns The dword
8888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8889 * @param iSegReg The index of the segment register to use for
8890 * this access. The base and limits are checked.
8891 * @param GCPtrMem The address of the guest memory.
8892 */
8893DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8894{
8895# ifdef IEM_WITH_DATA_TLB
8896 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8897 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8898 {
8899 /// @todo more later.
8900 }
8901
8902 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8903# else
8904 /* The lazy approach. */
8905 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8906 uint32_t const u32Ret = *pu32Src;
8907 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8908 return u32Ret;
8909# endif
8910}
8911#endif
8912
8913
8914#ifdef SOME_UNUSED_FUNCTION
8915/**
8916 * Fetches a data dword and sign extends it to a qword.
8917 *
8918 * @returns Strict VBox status code.
8919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8920 * @param pu64Dst Where to return the sign extended value.
8921 * @param iSegReg The index of the segment register to use for
8922 * this access. The base and limits are checked.
8923 * @param GCPtrMem The address of the guest memory.
8924 */
8925IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8926{
8927 /* The lazy approach for now... */
8928 int32_t const *pi32Src;
8929 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8930 if (rc == VINF_SUCCESS)
8931 {
8932 *pu64Dst = *pi32Src;
8933 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8934 }
8935#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8936 else
8937 *pu64Dst = 0;
8938#endif
8939 return rc;
8940}
8941#endif
8942
8943
8944/**
8945 * Fetches a data qword.
8946 *
8947 * @returns Strict VBox status code.
8948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8949 * @param pu64Dst Where to return the qword.
8950 * @param iSegReg The index of the segment register to use for
8951 * this access. The base and limits are checked.
8952 * @param GCPtrMem The address of the guest memory.
8953 */
8954IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8955{
8956 /* The lazy approach for now... */
8957 uint64_t const *pu64Src;
8958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8959 if (rc == VINF_SUCCESS)
8960 {
8961 *pu64Dst = *pu64Src;
8962 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8963 }
8964 return rc;
8965}
8966
8967
8968#ifdef IEM_WITH_SETJMP
8969/**
8970 * Fetches a data qword, longjmp on error.
8971 *
8972 * @returns The qword.
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param iSegReg The index of the segment register to use for
8975 * this access. The base and limits are checked.
8976 * @param GCPtrMem The address of the guest memory.
8977 */
8978DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8979{
8980 /* The lazy approach for now... */
8981 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8982 uint64_t const u64Ret = *pu64Src;
8983 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8984 return u64Ret;
8985}
8986#endif
8987
8988
8989/**
8990 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8991 *
8992 * @returns Strict VBox status code.
8993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8994 * @param pu64Dst Where to return the qword.
8995 * @param iSegReg The index of the segment register to use for
8996 * this access. The base and limits are checked.
8997 * @param GCPtrMem The address of the guest memory.
8998 */
8999IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9000{
9001 /* The lazy approach for now... */
9002 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9003 if (RT_UNLIKELY(GCPtrMem & 15))
9004 return iemRaiseGeneralProtectionFault0(pVCpu);
9005
9006 uint64_t const *pu64Src;
9007 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9008 if (rc == VINF_SUCCESS)
9009 {
9010 *pu64Dst = *pu64Src;
9011 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9012 }
9013 return rc;
9014}
9015
9016
9017#ifdef IEM_WITH_SETJMP
9018/**
9019 * Fetches a data qword, longjmp on error.
9020 *
9021 * @returns The qword.
9022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9023 * @param iSegReg The index of the segment register to use for
9024 * this access. The base and limits are checked.
9025 * @param GCPtrMem The address of the guest memory.
9026 */
9027DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9028{
9029 /* The lazy approach for now... */
9030 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9031 if (RT_LIKELY(!(GCPtrMem & 15)))
9032 {
9033 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9034 uint64_t const u64Ret = *pu64Src;
9035 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9036 return u64Ret;
9037 }
9038
9039 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9040 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9041}
9042#endif
9043
9044
9045/**
9046 * Fetches a data tword.
9047 *
9048 * @returns Strict VBox status code.
9049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9050 * @param pr80Dst Where to return the tword.
9051 * @param iSegReg The index of the segment register to use for
9052 * this access. The base and limits are checked.
9053 * @param GCPtrMem The address of the guest memory.
9054 */
9055IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9056{
9057 /* The lazy approach for now... */
9058 PCRTFLOAT80U pr80Src;
9059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9060 if (rc == VINF_SUCCESS)
9061 {
9062 *pr80Dst = *pr80Src;
9063 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9064 }
9065 return rc;
9066}
9067
9068
9069#ifdef IEM_WITH_SETJMP
9070/**
9071 * Fetches a data tword, longjmp on error.
9072 *
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pr80Dst Where to return the tword.
9075 * @param iSegReg The index of the segment register to use for
9076 * this access. The base and limits are checked.
9077 * @param GCPtrMem The address of the guest memory.
9078 */
9079DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9080{
9081 /* The lazy approach for now... */
9082 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9083 *pr80Dst = *pr80Src;
9084 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9085}
9086#endif
9087
9088
9089/**
9090 * Fetches a data dqword (double qword), generally SSE related.
9091 *
9092 * @returns Strict VBox status code.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param pu128Dst Where to return the qword.
9095 * @param iSegReg The index of the segment register to use for
9096 * this access. The base and limits are checked.
9097 * @param GCPtrMem The address of the guest memory.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9100{
9101 /* The lazy approach for now... */
9102 uint128_t const *pu128Src;
9103 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9104 if (rc == VINF_SUCCESS)
9105 {
9106 *pu128Dst = *pu128Src;
9107 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9108 }
9109 return rc;
9110}
9111
9112
9113#ifdef IEM_WITH_SETJMP
9114/**
9115 * Fetches a data dqword (double qword), generally SSE related.
9116 *
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param pu128Dst Where to return the qword.
9119 * @param iSegReg The index of the segment register to use for
9120 * this access. The base and limits are checked.
9121 * @param GCPtrMem The address of the guest memory.
9122 */
9123IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9124{
9125 /* The lazy approach for now... */
9126 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9127 *pu128Dst = *pu128Src;
9128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9129}
9130#endif
9131
9132
9133/**
9134 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9135 * related.
9136 *
9137 * Raises \#GP(0) if not aligned.
9138 *
9139 * @returns Strict VBox status code.
9140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9141 * @param pu128Dst Where to return the qword.
9142 * @param iSegReg The index of the segment register to use for
9143 * this access. The base and limits are checked.
9144 * @param GCPtrMem The address of the guest memory.
9145 */
9146IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9147{
9148 /* The lazy approach for now... */
9149 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9150 if ( (GCPtrMem & 15)
9151 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9152 return iemRaiseGeneralProtectionFault0(pVCpu);
9153
9154 uint128_t const *pu128Src;
9155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9156 if (rc == VINF_SUCCESS)
9157 {
9158 *pu128Dst = *pu128Src;
9159 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9160 }
9161 return rc;
9162}
9163
9164
9165#ifdef IEM_WITH_SETJMP
9166/**
9167 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9168 * related, longjmp on error.
9169 *
9170 * Raises \#GP(0) if not aligned.
9171 *
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pu128Dst Where to return the qword.
9174 * @param iSegReg The index of the segment register to use for
9175 * this access. The base and limits are checked.
9176 * @param GCPtrMem The address of the guest memory.
9177 */
9178DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9179{
9180 /* The lazy approach for now... */
9181 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9182 if ( (GCPtrMem & 15) == 0
9183 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9184 {
9185 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9186 IEM_ACCESS_DATA_R);
9187 *pu128Dst = *pu128Src;
9188 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9189 return;
9190 }
9191
9192 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9193 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9194}
9195#endif
9196
9197
9198
9199/**
9200 * Fetches a descriptor register (lgdt, lidt).
9201 *
9202 * @returns Strict VBox status code.
9203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9204 * @param pcbLimit Where to return the limit.
9205 * @param pGCPtrBase Where to return the base.
9206 * @param iSegReg The index of the segment register to use for
9207 * this access. The base and limits are checked.
9208 * @param GCPtrMem The address of the guest memory.
9209 * @param enmOpSize The effective operand size.
9210 */
9211IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9212 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9213{
9214 /*
9215 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9216 * little special:
9217 * - The two reads are done separately.
9218 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9219 * - We suspect the 386 to actually commit the limit before the base in
9220 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9221 * don't try emulate this eccentric behavior, because it's not well
9222 * enough understood and rather hard to trigger.
9223 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9224 */
9225 VBOXSTRICTRC rcStrict;
9226 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9227 {
9228 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9229 if (rcStrict == VINF_SUCCESS)
9230 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9231 }
9232 else
9233 {
9234 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9235 if (enmOpSize == IEMMODE_32BIT)
9236 {
9237 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9238 {
9239 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9240 if (rcStrict == VINF_SUCCESS)
9241 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9242 }
9243 else
9244 {
9245 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9246 if (rcStrict == VINF_SUCCESS)
9247 {
9248 *pcbLimit = (uint16_t)uTmp;
9249 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9250 }
9251 }
9252 if (rcStrict == VINF_SUCCESS)
9253 *pGCPtrBase = uTmp;
9254 }
9255 else
9256 {
9257 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9258 if (rcStrict == VINF_SUCCESS)
9259 {
9260 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9261 if (rcStrict == VINF_SUCCESS)
9262 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9263 }
9264 }
9265 }
9266 return rcStrict;
9267}
9268
9269
9270
9271/**
9272 * Stores a data byte.
9273 *
9274 * @returns Strict VBox status code.
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param iSegReg The index of the segment register to use for
9277 * this access. The base and limits are checked.
9278 * @param GCPtrMem The address of the guest memory.
9279 * @param u8Value The value to store.
9280 */
9281IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9282{
9283 /* The lazy approach for now... */
9284 uint8_t *pu8Dst;
9285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9286 if (rc == VINF_SUCCESS)
9287 {
9288 *pu8Dst = u8Value;
9289 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9290 }
9291 return rc;
9292}
9293
9294
9295#ifdef IEM_WITH_SETJMP
9296/**
9297 * Stores a data byte, longjmp on error.
9298 *
9299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9300 * @param iSegReg The index of the segment register to use for
9301 * this access. The base and limits are checked.
9302 * @param GCPtrMem The address of the guest memory.
9303 * @param u8Value The value to store.
9304 */
9305IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9306{
9307 /* The lazy approach for now... */
9308 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9309 *pu8Dst = u8Value;
9310 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9311}
9312#endif
9313
9314
9315/**
9316 * Stores a data word.
9317 *
9318 * @returns Strict VBox status code.
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param iSegReg The index of the segment register to use for
9321 * this access. The base and limits are checked.
9322 * @param GCPtrMem The address of the guest memory.
9323 * @param u16Value The value to store.
9324 */
9325IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9326{
9327 /* The lazy approach for now... */
9328 uint16_t *pu16Dst;
9329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9330 if (rc == VINF_SUCCESS)
9331 {
9332 *pu16Dst = u16Value;
9333 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9334 }
9335 return rc;
9336}
9337
9338
9339#ifdef IEM_WITH_SETJMP
9340/**
9341 * Stores a data word, longjmp on error.
9342 *
9343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9344 * @param iSegReg The index of the segment register to use for
9345 * this access. The base and limits are checked.
9346 * @param GCPtrMem The address of the guest memory.
9347 * @param u16Value The value to store.
9348 */
9349IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9350{
9351 /* The lazy approach for now... */
9352 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9353 *pu16Dst = u16Value;
9354 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9355}
9356#endif
9357
9358
9359/**
9360 * Stores a data dword.
9361 *
9362 * @returns Strict VBox status code.
9363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9364 * @param iSegReg The index of the segment register to use for
9365 * this access. The base and limits are checked.
9366 * @param GCPtrMem The address of the guest memory.
9367 * @param u32Value The value to store.
9368 */
9369IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9370{
9371 /* The lazy approach for now... */
9372 uint32_t *pu32Dst;
9373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9374 if (rc == VINF_SUCCESS)
9375 {
9376 *pu32Dst = u32Value;
9377 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9378 }
9379 return rc;
9380}
9381
9382
9383#ifdef IEM_WITH_SETJMP
9384/**
9385 * Stores a data dword.
9386 *
9387 * @returns Strict VBox status code.
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 * @param u32Value The value to store.
9393 */
9394IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9395{
9396 /* The lazy approach for now... */
9397 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9398 *pu32Dst = u32Value;
9399 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9400}
9401#endif
9402
9403
9404/**
9405 * Stores a data qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 * @param u64Value The value to store.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9415{
9416 /* The lazy approach for now... */
9417 uint64_t *pu64Dst;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = u64Value;
9422 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9423 }
9424 return rc;
9425}
9426
9427
9428#ifdef IEM_WITH_SETJMP
9429/**
9430 * Stores a data qword, longjmp on error.
9431 *
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 * @param u64Value The value to store.
9437 */
9438IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9439{
9440 /* The lazy approach for now... */
9441 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9442 *pu64Dst = u64Value;
9443 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9444}
9445#endif
9446
9447
9448/**
9449 * Stores a data dqword.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 * @param u128Value The value to store.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9459{
9460 /* The lazy approach for now... */
9461 uint128_t *pu128Dst;
9462 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9463 if (rc == VINF_SUCCESS)
9464 {
9465 *pu128Dst = u128Value;
9466 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9467 }
9468 return rc;
9469}
9470
9471
9472#ifdef IEM_WITH_SETJMP
9473/**
9474 * Stores a data dqword, longjmp on error.
9475 *
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param iSegReg The index of the segment register to use for
9478 * this access. The base and limits are checked.
9479 * @param GCPtrMem The address of the guest memory.
9480 * @param u128Value The value to store.
9481 */
9482IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9483{
9484 /* The lazy approach for now... */
9485 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9486 *pu128Dst = u128Value;
9487 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9488}
9489#endif
9490
9491
9492/**
9493 * Stores a data dqword, SSE aligned.
9494 *
9495 * @returns Strict VBox status code.
9496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9497 * @param iSegReg The index of the segment register to use for
9498 * this access. The base and limits are checked.
9499 * @param GCPtrMem The address of the guest memory.
9500 * @param u128Value The value to store.
9501 */
9502IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9503{
9504 /* The lazy approach for now... */
9505 if ( (GCPtrMem & 15)
9506 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9507 return iemRaiseGeneralProtectionFault0(pVCpu);
9508
9509 uint128_t *pu128Dst;
9510 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9511 if (rc == VINF_SUCCESS)
9512 {
9513 *pu128Dst = u128Value;
9514 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9515 }
9516 return rc;
9517}
9518
9519
9520#ifdef IEM_WITH_SETJMP
9521/**
9522 * Stores a data dqword, SSE aligned.
9523 *
9524 * @returns Strict VBox status code.
9525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 * @param u128Value The value to store.
9530 */
9531DECL_NO_INLINE(IEM_STATIC, void)
9532iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9533{
9534 /* The lazy approach for now... */
9535 if ( (GCPtrMem & 15) == 0
9536 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9537 {
9538 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9539 *pu128Dst = u128Value;
9540 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9541 return;
9542 }
9543
9544 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9545 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9546}
9547#endif
9548
9549
9550/**
9551 * Stores a descriptor register (sgdt, sidt).
9552 *
9553 * @returns Strict VBox status code.
9554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9555 * @param cbLimit The limit.
9556 * @param GCPtrBase The base address.
9557 * @param iSegReg The index of the segment register to use for
9558 * this access. The base and limits are checked.
9559 * @param GCPtrMem The address of the guest memory.
9560 */
9561IEM_STATIC VBOXSTRICTRC
9562iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9563{
9564 /*
9565 * The SIDT and SGDT instructions actually stores the data using two
9566 * independent writes. The instructions does not respond to opsize prefixes.
9567 */
9568 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9569 if (rcStrict == VINF_SUCCESS)
9570 {
9571 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9572 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9573 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9574 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9575 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9576 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9577 else
9578 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9579 }
9580 return rcStrict;
9581}
9582
9583
9584/**
9585 * Pushes a word onto the stack.
9586 *
9587 * @returns Strict VBox status code.
9588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9589 * @param u16Value The value to push.
9590 */
9591IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9592{
9593 /* Increment the stack pointer. */
9594 uint64_t uNewRsp;
9595 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9596 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9597
9598 /* Write the word the lazy way. */
9599 uint16_t *pu16Dst;
9600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9601 if (rc == VINF_SUCCESS)
9602 {
9603 *pu16Dst = u16Value;
9604 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9605 }
9606
9607 /* Commit the new RSP value unless we an access handler made trouble. */
9608 if (rc == VINF_SUCCESS)
9609 pCtx->rsp = uNewRsp;
9610
9611 return rc;
9612}
9613
9614
9615/**
9616 * Pushes a dword onto the stack.
9617 *
9618 * @returns Strict VBox status code.
9619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9620 * @param u32Value The value to push.
9621 */
9622IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9623{
9624 /* Increment the stack pointer. */
9625 uint64_t uNewRsp;
9626 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9627 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9628
9629 /* Write the dword the lazy way. */
9630 uint32_t *pu32Dst;
9631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9632 if (rc == VINF_SUCCESS)
9633 {
9634 *pu32Dst = u32Value;
9635 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9636 }
9637
9638 /* Commit the new RSP value unless we an access handler made trouble. */
9639 if (rc == VINF_SUCCESS)
9640 pCtx->rsp = uNewRsp;
9641
9642 return rc;
9643}
9644
9645
9646/**
9647 * Pushes a dword segment register value onto the stack.
9648 *
9649 * @returns Strict VBox status code.
9650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9651 * @param u32Value The value to push.
9652 */
9653IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9654{
9655 /* Increment the stack pointer. */
9656 uint64_t uNewRsp;
9657 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9658 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9659
9660 VBOXSTRICTRC rc;
9661 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9662 {
9663 /* The recompiler writes a full dword. */
9664 uint32_t *pu32Dst;
9665 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9666 if (rc == VINF_SUCCESS)
9667 {
9668 *pu32Dst = u32Value;
9669 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9670 }
9671 }
9672 else
9673 {
9674 /* The intel docs talks about zero extending the selector register
9675 value. My actual intel CPU here might be zero extending the value
9676 but it still only writes the lower word... */
9677 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9678 * happens when crossing an electric page boundrary, is the high word checked
9679 * for write accessibility or not? Probably it is. What about segment limits?
9680 * It appears this behavior is also shared with trap error codes.
9681 *
9682 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9683 * ancient hardware when it actually did change. */
9684 uint16_t *pu16Dst;
9685 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9686 if (rc == VINF_SUCCESS)
9687 {
9688 *pu16Dst = (uint16_t)u32Value;
9689 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9690 }
9691 }
9692
9693 /* Commit the new RSP value unless we an access handler made trouble. */
9694 if (rc == VINF_SUCCESS)
9695 pCtx->rsp = uNewRsp;
9696
9697 return rc;
9698}
9699
9700
9701/**
9702 * Pushes a qword onto the stack.
9703 *
9704 * @returns Strict VBox status code.
9705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9706 * @param u64Value The value to push.
9707 */
9708IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9709{
9710 /* Increment the stack pointer. */
9711 uint64_t uNewRsp;
9712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9713 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9714
9715 /* Write the word the lazy way. */
9716 uint64_t *pu64Dst;
9717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9718 if (rc == VINF_SUCCESS)
9719 {
9720 *pu64Dst = u64Value;
9721 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9722 }
9723
9724 /* Commit the new RSP value unless we an access handler made trouble. */
9725 if (rc == VINF_SUCCESS)
9726 pCtx->rsp = uNewRsp;
9727
9728 return rc;
9729}
9730
9731
9732/**
9733 * Pops a word from the stack.
9734 *
9735 * @returns Strict VBox status code.
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param pu16Value Where to store the popped value.
9738 */
9739IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9740{
9741 /* Increment the stack pointer. */
9742 uint64_t uNewRsp;
9743 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9744 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9745
9746 /* Write the word the lazy way. */
9747 uint16_t const *pu16Src;
9748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9749 if (rc == VINF_SUCCESS)
9750 {
9751 *pu16Value = *pu16Src;
9752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9753
9754 /* Commit the new RSP value. */
9755 if (rc == VINF_SUCCESS)
9756 pCtx->rsp = uNewRsp;
9757 }
9758
9759 return rc;
9760}
9761
9762
9763/**
9764 * Pops a dword from the stack.
9765 *
9766 * @returns Strict VBox status code.
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu32Value Where to store the popped value.
9769 */
9770IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9771{
9772 /* Increment the stack pointer. */
9773 uint64_t uNewRsp;
9774 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9775 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9776
9777 /* Write the word the lazy way. */
9778 uint32_t const *pu32Src;
9779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9780 if (rc == VINF_SUCCESS)
9781 {
9782 *pu32Value = *pu32Src;
9783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9784
9785 /* Commit the new RSP value. */
9786 if (rc == VINF_SUCCESS)
9787 pCtx->rsp = uNewRsp;
9788 }
9789
9790 return rc;
9791}
9792
9793
9794/**
9795 * Pops a qword from the stack.
9796 *
9797 * @returns Strict VBox status code.
9798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9799 * @param pu64Value Where to store the popped value.
9800 */
9801IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9802{
9803 /* Increment the stack pointer. */
9804 uint64_t uNewRsp;
9805 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9806 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9807
9808 /* Write the word the lazy way. */
9809 uint64_t const *pu64Src;
9810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9811 if (rc == VINF_SUCCESS)
9812 {
9813 *pu64Value = *pu64Src;
9814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9815
9816 /* Commit the new RSP value. */
9817 if (rc == VINF_SUCCESS)
9818 pCtx->rsp = uNewRsp;
9819 }
9820
9821 return rc;
9822}
9823
9824
9825/**
9826 * Pushes a word onto the stack, using a temporary stack pointer.
9827 *
9828 * @returns Strict VBox status code.
9829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9830 * @param u16Value The value to push.
9831 * @param pTmpRsp Pointer to the temporary stack pointer.
9832 */
9833IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9834{
9835 /* Increment the stack pointer. */
9836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9837 RTUINT64U NewRsp = *pTmpRsp;
9838 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9839
9840 /* Write the word the lazy way. */
9841 uint16_t *pu16Dst;
9842 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9843 if (rc == VINF_SUCCESS)
9844 {
9845 *pu16Dst = u16Value;
9846 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9847 }
9848
9849 /* Commit the new RSP value unless we an access handler made trouble. */
9850 if (rc == VINF_SUCCESS)
9851 *pTmpRsp = NewRsp;
9852
9853 return rc;
9854}
9855
9856
9857/**
9858 * Pushes a dword onto the stack, using a temporary stack pointer.
9859 *
9860 * @returns Strict VBox status code.
9861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9862 * @param u32Value The value to push.
9863 * @param pTmpRsp Pointer to the temporary stack pointer.
9864 */
9865IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9866{
9867 /* Increment the stack pointer. */
9868 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9869 RTUINT64U NewRsp = *pTmpRsp;
9870 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9871
9872 /* Write the word the lazy way. */
9873 uint32_t *pu32Dst;
9874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9875 if (rc == VINF_SUCCESS)
9876 {
9877 *pu32Dst = u32Value;
9878 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9879 }
9880
9881 /* Commit the new RSP value unless we an access handler made trouble. */
9882 if (rc == VINF_SUCCESS)
9883 *pTmpRsp = NewRsp;
9884
9885 return rc;
9886}
9887
9888
9889/**
9890 * Pushes a dword onto the stack, using a temporary stack pointer.
9891 *
9892 * @returns Strict VBox status code.
9893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9894 * @param u64Value The value to push.
9895 * @param pTmpRsp Pointer to the temporary stack pointer.
9896 */
9897IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9898{
9899 /* Increment the stack pointer. */
9900 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9901 RTUINT64U NewRsp = *pTmpRsp;
9902 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9903
9904 /* Write the word the lazy way. */
9905 uint64_t *pu64Dst;
9906 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9907 if (rc == VINF_SUCCESS)
9908 {
9909 *pu64Dst = u64Value;
9910 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9911 }
9912
9913 /* Commit the new RSP value unless we an access handler made trouble. */
9914 if (rc == VINF_SUCCESS)
9915 *pTmpRsp = NewRsp;
9916
9917 return rc;
9918}
9919
9920
9921/**
9922 * Pops a word from the stack, using a temporary stack pointer.
9923 *
9924 * @returns Strict VBox status code.
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param pu16Value Where to store the popped value.
9927 * @param pTmpRsp Pointer to the temporary stack pointer.
9928 */
9929IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9930{
9931 /* Increment the stack pointer. */
9932 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9933 RTUINT64U NewRsp = *pTmpRsp;
9934 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9935
9936 /* Write the word the lazy way. */
9937 uint16_t const *pu16Src;
9938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9939 if (rc == VINF_SUCCESS)
9940 {
9941 *pu16Value = *pu16Src;
9942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9943
9944 /* Commit the new RSP value. */
9945 if (rc == VINF_SUCCESS)
9946 *pTmpRsp = NewRsp;
9947 }
9948
9949 return rc;
9950}
9951
9952
9953/**
9954 * Pops a dword from the stack, using a temporary stack pointer.
9955 *
9956 * @returns Strict VBox status code.
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param pu32Value Where to store the popped value.
9959 * @param pTmpRsp Pointer to the temporary stack pointer.
9960 */
9961IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9962{
9963 /* Increment the stack pointer. */
9964 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9965 RTUINT64U NewRsp = *pTmpRsp;
9966 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9967
9968 /* Write the word the lazy way. */
9969 uint32_t const *pu32Src;
9970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9971 if (rc == VINF_SUCCESS)
9972 {
9973 *pu32Value = *pu32Src;
9974 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9975
9976 /* Commit the new RSP value. */
9977 if (rc == VINF_SUCCESS)
9978 *pTmpRsp = NewRsp;
9979 }
9980
9981 return rc;
9982}
9983
9984
9985/**
9986 * Pops a qword from the stack, using a temporary stack pointer.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param pu64Value Where to store the popped value.
9991 * @param pTmpRsp Pointer to the temporary stack pointer.
9992 */
9993IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9994{
9995 /* Increment the stack pointer. */
9996 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9997 RTUINT64U NewRsp = *pTmpRsp;
9998 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9999
10000 /* Write the word the lazy way. */
10001 uint64_t const *pu64Src;
10002 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10003 if (rcStrict == VINF_SUCCESS)
10004 {
10005 *pu64Value = *pu64Src;
10006 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10007
10008 /* Commit the new RSP value. */
10009 if (rcStrict == VINF_SUCCESS)
10010 *pTmpRsp = NewRsp;
10011 }
10012
10013 return rcStrict;
10014}
10015
10016
10017/**
10018 * Begin a special stack push (used by interrupt, exceptions and such).
10019 *
10020 * This will raise \#SS or \#PF if appropriate.
10021 *
10022 * @returns Strict VBox status code.
10023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10024 * @param cbMem The number of bytes to push onto the stack.
10025 * @param ppvMem Where to return the pointer to the stack memory.
10026 * As with the other memory functions this could be
10027 * direct access or bounce buffered access, so
10028 * don't commit register until the commit call
10029 * succeeds.
10030 * @param puNewRsp Where to return the new RSP value. This must be
10031 * passed unchanged to
10032 * iemMemStackPushCommitSpecial().
10033 */
10034IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10035{
10036 Assert(cbMem < UINT8_MAX);
10037 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10038 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10039 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10040}
10041
10042
10043/**
10044 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10045 *
10046 * This will update the rSP.
10047 *
10048 * @returns Strict VBox status code.
10049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10050 * @param pvMem The pointer returned by
10051 * iemMemStackPushBeginSpecial().
10052 * @param uNewRsp The new RSP value returned by
10053 * iemMemStackPushBeginSpecial().
10054 */
10055IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10056{
10057 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10058 if (rcStrict == VINF_SUCCESS)
10059 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10060 return rcStrict;
10061}
10062
10063
10064/**
10065 * Begin a special stack pop (used by iret, retf and such).
10066 *
10067 * This will raise \#SS or \#PF if appropriate.
10068 *
10069 * @returns Strict VBox status code.
10070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10071 * @param cbMem The number of bytes to pop from the stack.
10072 * @param ppvMem Where to return the pointer to the stack memory.
10073 * @param puNewRsp Where to return the new RSP value. This must be
10074 * assigned to CPUMCTX::rsp manually some time
10075 * after iemMemStackPopDoneSpecial() has been
10076 * called.
10077 */
10078IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10079{
10080 Assert(cbMem < UINT8_MAX);
10081 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10082 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10083 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10084}
10085
10086
10087/**
10088 * Continue a special stack pop (used by iret and retf).
10089 *
10090 * This will raise \#SS or \#PF if appropriate.
10091 *
10092 * @returns Strict VBox status code.
10093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10094 * @param cbMem The number of bytes to pop from the stack.
10095 * @param ppvMem Where to return the pointer to the stack memory.
10096 * @param puNewRsp Where to return the new RSP value. This must be
10097 * assigned to CPUMCTX::rsp manually some time
10098 * after iemMemStackPopDoneSpecial() has been
10099 * called.
10100 */
10101IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10102{
10103 Assert(cbMem < UINT8_MAX);
10104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10105 RTUINT64U NewRsp;
10106 NewRsp.u = *puNewRsp;
10107 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10108 *puNewRsp = NewRsp.u;
10109 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10110}
10111
10112
10113/**
10114 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10115 * iemMemStackPopContinueSpecial).
10116 *
10117 * The caller will manually commit the rSP.
10118 *
10119 * @returns Strict VBox status code.
10120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10121 * @param pvMem The pointer returned by
10122 * iemMemStackPopBeginSpecial() or
10123 * iemMemStackPopContinueSpecial().
10124 */
10125IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10126{
10127 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10128}
10129
10130
10131/**
10132 * Fetches a system table byte.
10133 *
10134 * @returns Strict VBox status code.
10135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10136 * @param pbDst Where to return the byte.
10137 * @param iSegReg The index of the segment register to use for
10138 * this access. The base and limits are checked.
10139 * @param GCPtrMem The address of the guest memory.
10140 */
10141IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10142{
10143 /* The lazy approach for now... */
10144 uint8_t const *pbSrc;
10145 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10146 if (rc == VINF_SUCCESS)
10147 {
10148 *pbDst = *pbSrc;
10149 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10150 }
10151 return rc;
10152}
10153
10154
10155/**
10156 * Fetches a system table word.
10157 *
10158 * @returns Strict VBox status code.
10159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10160 * @param pu16Dst Where to return the word.
10161 * @param iSegReg The index of the segment register to use for
10162 * this access. The base and limits are checked.
10163 * @param GCPtrMem The address of the guest memory.
10164 */
10165IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10166{
10167 /* The lazy approach for now... */
10168 uint16_t const *pu16Src;
10169 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10170 if (rc == VINF_SUCCESS)
10171 {
10172 *pu16Dst = *pu16Src;
10173 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10174 }
10175 return rc;
10176}
10177
10178
10179/**
10180 * Fetches a system table dword.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10184 * @param pu32Dst Where to return the dword.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 */
10189IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10190{
10191 /* The lazy approach for now... */
10192 uint32_t const *pu32Src;
10193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10194 if (rc == VINF_SUCCESS)
10195 {
10196 *pu32Dst = *pu32Src;
10197 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10198 }
10199 return rc;
10200}
10201
10202
10203/**
10204 * Fetches a system table qword.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param pu64Dst Where to return the qword.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 */
10213IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10214{
10215 /* The lazy approach for now... */
10216 uint64_t const *pu64Src;
10217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10218 if (rc == VINF_SUCCESS)
10219 {
10220 *pu64Dst = *pu64Src;
10221 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10222 }
10223 return rc;
10224}
10225
10226
10227/**
10228 * Fetches a descriptor table entry with caller specified error code.
10229 *
10230 * @returns Strict VBox status code.
10231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10232 * @param pDesc Where to return the descriptor table entry.
10233 * @param uSel The selector which table entry to fetch.
10234 * @param uXcpt The exception to raise on table lookup error.
10235 * @param uErrorCode The error code associated with the exception.
10236 */
10237IEM_STATIC VBOXSTRICTRC
10238iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10239{
10240 AssertPtr(pDesc);
10241 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10242
10243 /** @todo did the 286 require all 8 bytes to be accessible? */
10244 /*
10245 * Get the selector table base and check bounds.
10246 */
10247 RTGCPTR GCPtrBase;
10248 if (uSel & X86_SEL_LDT)
10249 {
10250 if ( !pCtx->ldtr.Attr.n.u1Present
10251 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10252 {
10253 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10254 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10255 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10256 uErrorCode, 0);
10257 }
10258
10259 Assert(pCtx->ldtr.Attr.n.u1Present);
10260 GCPtrBase = pCtx->ldtr.u64Base;
10261 }
10262 else
10263 {
10264 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10265 {
10266 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10267 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10268 uErrorCode, 0);
10269 }
10270 GCPtrBase = pCtx->gdtr.pGdt;
10271 }
10272
10273 /*
10274 * Read the legacy descriptor and maybe the long mode extensions if
10275 * required.
10276 */
10277 VBOXSTRICTRC rcStrict;
10278 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10279 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10280 else
10281 {
10282 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10283 if (rcStrict == VINF_SUCCESS)
10284 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10285 if (rcStrict == VINF_SUCCESS)
10286 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10287 if (rcStrict == VINF_SUCCESS)
10288 pDesc->Legacy.au16[3] = 0;
10289 else
10290 return rcStrict;
10291 }
10292
10293 if (rcStrict == VINF_SUCCESS)
10294 {
10295 if ( !IEM_IS_LONG_MODE(pVCpu)
10296 || pDesc->Legacy.Gen.u1DescType)
10297 pDesc->Long.au64[1] = 0;
10298 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10299 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10300 else
10301 {
10302 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10303 /** @todo is this the right exception? */
10304 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10305 }
10306 }
10307 return rcStrict;
10308}
10309
10310
10311/**
10312 * Fetches a descriptor table entry.
10313 *
10314 * @returns Strict VBox status code.
10315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10316 * @param pDesc Where to return the descriptor table entry.
10317 * @param uSel The selector which table entry to fetch.
10318 * @param uXcpt The exception to raise on table lookup error.
10319 */
10320IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10321{
10322 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10323}
10324
10325
10326/**
10327 * Fakes a long mode stack selector for SS = 0.
10328 *
10329 * @param pDescSs Where to return the fake stack descriptor.
10330 * @param uDpl The DPL we want.
10331 */
10332IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10333{
10334 pDescSs->Long.au64[0] = 0;
10335 pDescSs->Long.au64[1] = 0;
10336 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10337 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10338 pDescSs->Long.Gen.u2Dpl = uDpl;
10339 pDescSs->Long.Gen.u1Present = 1;
10340 pDescSs->Long.Gen.u1Long = 1;
10341}
10342
10343
10344/**
10345 * Marks the selector descriptor as accessed (only non-system descriptors).
10346 *
10347 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10348 * will therefore skip the limit checks.
10349 *
10350 * @returns Strict VBox status code.
10351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10352 * @param uSel The selector.
10353 */
10354IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10355{
10356 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10357
10358 /*
10359 * Get the selector table base and calculate the entry address.
10360 */
10361 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10362 ? pCtx->ldtr.u64Base
10363 : pCtx->gdtr.pGdt;
10364 GCPtr += uSel & X86_SEL_MASK;
10365
10366 /*
10367 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10368 * ugly stuff to avoid this. This will make sure it's an atomic access
10369 * as well more or less remove any question about 8-bit or 32-bit accesss.
10370 */
10371 VBOXSTRICTRC rcStrict;
10372 uint32_t volatile *pu32;
10373 if ((GCPtr & 3) == 0)
10374 {
10375 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10376 GCPtr += 2 + 2;
10377 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10378 if (rcStrict != VINF_SUCCESS)
10379 return rcStrict;
10380 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10381 }
10382 else
10383 {
10384 /* The misaligned GDT/LDT case, map the whole thing. */
10385 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10386 if (rcStrict != VINF_SUCCESS)
10387 return rcStrict;
10388 switch ((uintptr_t)pu32 & 3)
10389 {
10390 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10391 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10392 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10393 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10394 }
10395 }
10396
10397 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10398}
10399
10400/** @} */
10401
10402
10403/*
10404 * Include the C/C++ implementation of instruction.
10405 */
10406#include "IEMAllCImpl.cpp.h"
10407
10408
10409
10410/** @name "Microcode" macros.
10411 *
10412 * The idea is that we should be able to use the same code to interpret
10413 * instructions as well as recompiler instructions. Thus this obfuscation.
10414 *
10415 * @{
10416 */
10417#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10418#define IEM_MC_END() }
10419#define IEM_MC_PAUSE() do {} while (0)
10420#define IEM_MC_CONTINUE() do {} while (0)
10421
10422/** Internal macro. */
10423#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10424 do \
10425 { \
10426 VBOXSTRICTRC rcStrict2 = a_Expr; \
10427 if (rcStrict2 != VINF_SUCCESS) \
10428 return rcStrict2; \
10429 } while (0)
10430
10431
10432#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10433#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10434#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10435#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10436#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10437#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10438#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10439#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10440#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10441 do { \
10442 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10443 return iemRaiseDeviceNotAvailable(pVCpu); \
10444 } while (0)
10445#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10446 do { \
10447 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10448 return iemRaiseDeviceNotAvailable(pVCpu); \
10449 } while (0)
10450#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10451 do { \
10452 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10453 return iemRaiseMathFault(pVCpu); \
10454 } while (0)
10455#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10456 do { \
10457 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10458 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10459 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10460 return iemRaiseUndefinedOpcode(pVCpu); \
10461 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10462 return iemRaiseDeviceNotAvailable(pVCpu); \
10463 } while (0)
10464#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10465 do { \
10466 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10467 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10468 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10469 return iemRaiseUndefinedOpcode(pVCpu); \
10470 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10471 return iemRaiseDeviceNotAvailable(pVCpu); \
10472 } while (0)
10473#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10474 do { \
10475 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10476 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10477 return iemRaiseUndefinedOpcode(pVCpu); \
10478 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10479 return iemRaiseDeviceNotAvailable(pVCpu); \
10480 } while (0)
10481#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10482 do { \
10483 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10484 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10485 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10486 return iemRaiseUndefinedOpcode(pVCpu); \
10487 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10488 return iemRaiseDeviceNotAvailable(pVCpu); \
10489 } while (0)
10490#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10491 do { \
10492 if (pVCpu->iem.s.uCpl != 0) \
10493 return iemRaiseGeneralProtectionFault0(pVCpu); \
10494 } while (0)
10495#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10496 do { \
10497 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10498 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10499 } while (0)
10500
10501
10502#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10503#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10504#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10505#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10506#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10507#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10508#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10509 uint32_t a_Name; \
10510 uint32_t *a_pName = &a_Name
10511#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10512 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10513
10514#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10515#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10516
10517#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10518#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10519#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10520#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10521#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10522#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10523#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10524#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10525#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10526#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10527#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10528#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10529#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10530#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10531#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10532#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10533#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10534#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10535#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10536#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10537#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10538#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10539#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10540#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10541#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10542#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10543#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10544#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10545#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10546/** @note Not for IOPL or IF testing or modification. */
10547#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10548#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10549#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10550#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10551
10552#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10553#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10554#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10555#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10556#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10557#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10558#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10559#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10560#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10561#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10562#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10563 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10564
10565#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10566#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10567/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10568 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10569#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10570#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10571/** @note Not for IOPL or IF testing or modification. */
10572#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10573
10574#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10575#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10576#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10577 do { \
10578 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10579 *pu32Reg += (a_u32Value); \
10580 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10581 } while (0)
10582#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10583
10584#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10585#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10586#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10587 do { \
10588 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10589 *pu32Reg -= (a_u32Value); \
10590 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10591 } while (0)
10592#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10593#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10594
10595#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10596#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10597#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10598#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10599#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10600#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10601#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10602
10603#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10604#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10605#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10606#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10607
10608#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10609#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10610#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10611
10612#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10613#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10614#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10615
10616#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10617#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10618#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10619
10620#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10621#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10622#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10623
10624#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10625
10626#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10627
10628#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10629#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10630#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10631 do { \
10632 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10633 *pu32Reg &= (a_u32Value); \
10634 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10635 } while (0)
10636#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10637
10638#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10639#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10640#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10641 do { \
10642 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10643 *pu32Reg |= (a_u32Value); \
10644 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10645 } while (0)
10646#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10647
10648
10649/** @note Not for IOPL or IF modification. */
10650#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10651/** @note Not for IOPL or IF modification. */
10652#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10653/** @note Not for IOPL or IF modification. */
10654#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10655
10656#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10657
10658
10659#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10660 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10661#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10662 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10663#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10664 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10665#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10666 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10667#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10668 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10669#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10670 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10671#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10672 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10673
10674#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10675 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10676#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10677 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10678#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10679 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10680#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10681 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10682#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10683 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10684#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10685 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10686 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10687 } while (0)
10688#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10689 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10690 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10691 } while (0)
10692#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10693 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10694#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10695 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10696#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10697 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10698#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10699 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10700 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10701
10702#ifndef IEM_WITH_SETJMP
10703# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10705# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10707# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10709#else
10710# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10711 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10712# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10713 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10714# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10715 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10716#endif
10717
10718#ifndef IEM_WITH_SETJMP
10719# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10721# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10723# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10725#else
10726# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10727 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10728# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10729 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10730# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10731 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10732#endif
10733
10734#ifndef IEM_WITH_SETJMP
10735# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10737# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10739# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10741#else
10742# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10743 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10744# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10745 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10746# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10747 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10748#endif
10749
10750#ifdef SOME_UNUSED_FUNCTION
10751# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10753#endif
10754
10755#ifndef IEM_WITH_SETJMP
10756# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10758# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10760# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10762# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10764#else
10765# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10766 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10767# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10768 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10769# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10770 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10771# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10772 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10773#endif
10774
10775#ifndef IEM_WITH_SETJMP
10776# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10778# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10780# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10782#else
10783# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10784 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10785# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10786 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10787# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10788 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10789#endif
10790
10791#ifndef IEM_WITH_SETJMP
10792# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10794# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10795 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10796#else
10797# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10798 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10799# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10800 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10801#endif
10802
10803
10804
10805#ifndef IEM_WITH_SETJMP
10806# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10807 do { \
10808 uint8_t u8Tmp; \
10809 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10810 (a_u16Dst) = u8Tmp; \
10811 } while (0)
10812# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10813 do { \
10814 uint8_t u8Tmp; \
10815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10816 (a_u32Dst) = u8Tmp; \
10817 } while (0)
10818# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10819 do { \
10820 uint8_t u8Tmp; \
10821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10822 (a_u64Dst) = u8Tmp; \
10823 } while (0)
10824# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10825 do { \
10826 uint16_t u16Tmp; \
10827 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10828 (a_u32Dst) = u16Tmp; \
10829 } while (0)
10830# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10831 do { \
10832 uint16_t u16Tmp; \
10833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10834 (a_u64Dst) = u16Tmp; \
10835 } while (0)
10836# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10837 do { \
10838 uint32_t u32Tmp; \
10839 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10840 (a_u64Dst) = u32Tmp; \
10841 } while (0)
10842#else /* IEM_WITH_SETJMP */
10843# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10844 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10845# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10846 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10847# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10848 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10849# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10850 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10851# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10852 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10853# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10854 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10855#endif /* IEM_WITH_SETJMP */
10856
10857#ifndef IEM_WITH_SETJMP
10858# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10859 do { \
10860 uint8_t u8Tmp; \
10861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10862 (a_u16Dst) = (int8_t)u8Tmp; \
10863 } while (0)
10864# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10865 do { \
10866 uint8_t u8Tmp; \
10867 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10868 (a_u32Dst) = (int8_t)u8Tmp; \
10869 } while (0)
10870# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10871 do { \
10872 uint8_t u8Tmp; \
10873 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10874 (a_u64Dst) = (int8_t)u8Tmp; \
10875 } while (0)
10876# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10877 do { \
10878 uint16_t u16Tmp; \
10879 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10880 (a_u32Dst) = (int16_t)u16Tmp; \
10881 } while (0)
10882# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10883 do { \
10884 uint16_t u16Tmp; \
10885 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10886 (a_u64Dst) = (int16_t)u16Tmp; \
10887 } while (0)
10888# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10889 do { \
10890 uint32_t u32Tmp; \
10891 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10892 (a_u64Dst) = (int32_t)u32Tmp; \
10893 } while (0)
10894#else /* IEM_WITH_SETJMP */
10895# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10896 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10897# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10898 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10899# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10900 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10901# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10902 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10903# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10904 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10905# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10906 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10907#endif /* IEM_WITH_SETJMP */
10908
10909#ifndef IEM_WITH_SETJMP
10910# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10912# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10914# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10915 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10916# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10917 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10918#else
10919# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10920 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10921# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10922 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10923# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10924 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10925# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10926 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10927#endif
10928
10929#ifndef IEM_WITH_SETJMP
10930# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10931 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10932# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10934# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10936# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10937 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10938#else
10939# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10940 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10941# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10942 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10943# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10944 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10945# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10946 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10947#endif
10948
10949#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10950#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10951#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10952#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10953#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10954#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10955#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10956 do { \
10957 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10958 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10959 } while (0)
10960
10961#ifndef IEM_WITH_SETJMP
10962# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10963 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10964# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10965 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10966#else
10967# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10968 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10969# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10970 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10971#endif
10972
10973
10974#define IEM_MC_PUSH_U16(a_u16Value) \
10975 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10976#define IEM_MC_PUSH_U32(a_u32Value) \
10977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10978#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10980#define IEM_MC_PUSH_U64(a_u64Value) \
10981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10982
10983#define IEM_MC_POP_U16(a_pu16Value) \
10984 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10985#define IEM_MC_POP_U32(a_pu32Value) \
10986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10987#define IEM_MC_POP_U64(a_pu64Value) \
10988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10989
10990/** Maps guest memory for direct or bounce buffered access.
10991 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10992 * @remarks May return.
10993 */
10994#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10995 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10996
10997/** Maps guest memory for direct or bounce buffered access.
10998 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10999 * @remarks May return.
11000 */
11001#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11002 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11003
11004/** Commits the memory and unmaps the guest memory.
11005 * @remarks May return.
11006 */
11007#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11008 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11009
11010/** Commits the memory and unmaps the guest memory unless the FPU status word
11011 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11012 * that would cause FLD not to store.
11013 *
11014 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11015 * store, while \#P will not.
11016 *
11017 * @remarks May in theory return - for now.
11018 */
11019#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11020 do { \
11021 if ( !(a_u16FSW & X86_FSW_ES) \
11022 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11023 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11024 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11025 } while (0)
11026
11027/** Calculate efficient address from R/M. */
11028#ifndef IEM_WITH_SETJMP
11029# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11030 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11031#else
11032# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11033 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11034#endif
11035
11036#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11037#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11038#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11039#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11040#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11041#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11042#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11043
11044/**
11045 * Defers the rest of the instruction emulation to a C implementation routine
11046 * and returns, only taking the standard parameters.
11047 *
11048 * @param a_pfnCImpl The pointer to the C routine.
11049 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11050 */
11051#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11052
11053/**
11054 * Defers the rest of instruction emulation to a C implementation routine and
11055 * returns, taking one argument in addition to the standard ones.
11056 *
11057 * @param a_pfnCImpl The pointer to the C routine.
11058 * @param a0 The argument.
11059 */
11060#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11061
11062/**
11063 * Defers the rest of the instruction emulation to a C implementation routine
11064 * and returns, taking two arguments in addition to the standard ones.
11065 *
11066 * @param a_pfnCImpl The pointer to the C routine.
11067 * @param a0 The first extra argument.
11068 * @param a1 The second extra argument.
11069 */
11070#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11071
11072/**
11073 * Defers the rest of the instruction emulation to a C implementation routine
11074 * and returns, taking three arguments in addition to the standard ones.
11075 *
11076 * @param a_pfnCImpl The pointer to the C routine.
11077 * @param a0 The first extra argument.
11078 * @param a1 The second extra argument.
11079 * @param a2 The third extra argument.
11080 */
11081#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11082
11083/**
11084 * Defers the rest of the instruction emulation to a C implementation routine
11085 * and returns, taking four arguments in addition to the standard ones.
11086 *
11087 * @param a_pfnCImpl The pointer to the C routine.
11088 * @param a0 The first extra argument.
11089 * @param a1 The second extra argument.
11090 * @param a2 The third extra argument.
11091 * @param a3 The fourth extra argument.
11092 */
11093#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11094
11095/**
11096 * Defers the rest of the instruction emulation to a C implementation routine
11097 * and returns, taking two arguments in addition to the standard ones.
11098 *
11099 * @param a_pfnCImpl The pointer to the C routine.
11100 * @param a0 The first extra argument.
11101 * @param a1 The second extra argument.
11102 * @param a2 The third extra argument.
11103 * @param a3 The fourth extra argument.
11104 * @param a4 The fifth extra argument.
11105 */
11106#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11107
11108/**
11109 * Defers the entire instruction emulation to a C implementation routine and
11110 * returns, only taking the standard parameters.
11111 *
11112 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11113 *
11114 * @param a_pfnCImpl The pointer to the C routine.
11115 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11116 */
11117#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11118
11119/**
11120 * Defers the entire instruction emulation to a C implementation routine and
11121 * returns, taking one argument in addition to the standard ones.
11122 *
11123 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11124 *
11125 * @param a_pfnCImpl The pointer to the C routine.
11126 * @param a0 The argument.
11127 */
11128#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11129
11130/**
11131 * Defers the entire instruction emulation to a C implementation routine and
11132 * returns, taking two arguments in addition to the standard ones.
11133 *
11134 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11135 *
11136 * @param a_pfnCImpl The pointer to the C routine.
11137 * @param a0 The first extra argument.
11138 * @param a1 The second extra argument.
11139 */
11140#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11141
11142/**
11143 * Defers the entire instruction emulation to a C implementation routine and
11144 * returns, taking three arguments in addition to the standard ones.
11145 *
11146 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11147 *
11148 * @param a_pfnCImpl The pointer to the C routine.
11149 * @param a0 The first extra argument.
11150 * @param a1 The second extra argument.
11151 * @param a2 The third extra argument.
11152 */
11153#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11154
11155/**
11156 * Calls a FPU assembly implementation taking one visible argument.
11157 *
11158 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11159 * @param a0 The first extra argument.
11160 */
11161#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11162 do { \
11163 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11164 } while (0)
11165
11166/**
11167 * Calls a FPU assembly implementation taking two visible arguments.
11168 *
11169 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11170 * @param a0 The first extra argument.
11171 * @param a1 The second extra argument.
11172 */
11173#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11174 do { \
11175 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11176 } while (0)
11177
11178/**
11179 * Calls a FPU assembly implementation taking three visible arguments.
11180 *
11181 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11182 * @param a0 The first extra argument.
11183 * @param a1 The second extra argument.
11184 * @param a2 The third extra argument.
11185 */
11186#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11187 do { \
11188 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11189 } while (0)
11190
11191#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11192 do { \
11193 (a_FpuData).FSW = (a_FSW); \
11194 (a_FpuData).r80Result = *(a_pr80Value); \
11195 } while (0)
11196
11197/** Pushes FPU result onto the stack. */
11198#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11199 iemFpuPushResult(pVCpu, &a_FpuData)
11200/** Pushes FPU result onto the stack and sets the FPUDP. */
11201#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11202 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11203
11204/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11205#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11206 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11207
11208/** Stores FPU result in a stack register. */
11209#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11210 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11211/** Stores FPU result in a stack register and pops the stack. */
11212#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11213 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11214/** Stores FPU result in a stack register and sets the FPUDP. */
11215#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11216 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11217/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11218 * stack. */
11219#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11220 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11221
11222/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11223#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11224 iemFpuUpdateOpcodeAndIp(pVCpu)
11225/** Free a stack register (for FFREE and FFREEP). */
11226#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11227 iemFpuStackFree(pVCpu, a_iStReg)
11228/** Increment the FPU stack pointer. */
11229#define IEM_MC_FPU_STACK_INC_TOP() \
11230 iemFpuStackIncTop(pVCpu)
11231/** Decrement the FPU stack pointer. */
11232#define IEM_MC_FPU_STACK_DEC_TOP() \
11233 iemFpuStackDecTop(pVCpu)
11234
11235/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11236#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11237 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11238/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11239#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11240 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11241/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11242#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11243 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11244/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11245#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11246 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11247/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11248 * stack. */
11249#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11250 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11251/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11252#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11253 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11254
11255/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11256#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11257 iemFpuStackUnderflow(pVCpu, a_iStDst)
11258/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11259 * stack. */
11260#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11261 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11262/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11263 * FPUDS. */
11264#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11265 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11266/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11267 * FPUDS. Pops stack. */
11268#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11269 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11270/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11271 * stack twice. */
11272#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11273 iemFpuStackUnderflowThenPopPop(pVCpu)
11274/** Raises a FPU stack underflow exception for an instruction pushing a result
11275 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11276#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11277 iemFpuStackPushUnderflow(pVCpu)
11278/** Raises a FPU stack underflow exception for an instruction pushing a result
11279 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11280#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11281 iemFpuStackPushUnderflowTwo(pVCpu)
11282
11283/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11284 * FPUIP, FPUCS and FOP. */
11285#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11286 iemFpuStackPushOverflow(pVCpu)
11287/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11288 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11289#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11290 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11291/** Prepares for using the FPU state.
11292 * Ensures that we can use the host FPU in the current context (RC+R0.
11293 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11294#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11295/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11296#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11297/** Actualizes the guest FPU state so it can be accessed and modified. */
11298#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11299
11300/** Prepares for using the SSE state.
11301 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11302 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11303#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11304/** Actualizes the guest XMM0..15 register state for read-only access. */
11305#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11306/** Actualizes the guest XMM0..15 register state for read-write access. */
11307#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11308
11309/**
11310 * Calls a MMX assembly implementation taking two visible arguments.
11311 *
11312 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11313 * @param a0 The first extra argument.
11314 * @param a1 The second extra argument.
11315 */
11316#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11317 do { \
11318 IEM_MC_PREPARE_FPU_USAGE(); \
11319 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11320 } while (0)
11321
11322/**
11323 * Calls a MMX assembly implementation taking three visible arguments.
11324 *
11325 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11326 * @param a0 The first extra argument.
11327 * @param a1 The second extra argument.
11328 * @param a2 The third extra argument.
11329 */
11330#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11331 do { \
11332 IEM_MC_PREPARE_FPU_USAGE(); \
11333 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11334 } while (0)
11335
11336
11337/**
11338 * Calls a SSE assembly implementation taking two visible arguments.
11339 *
11340 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11341 * @param a0 The first extra argument.
11342 * @param a1 The second extra argument.
11343 */
11344#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11345 do { \
11346 IEM_MC_PREPARE_SSE_USAGE(); \
11347 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11348 } while (0)
11349
11350/**
11351 * Calls a SSE assembly implementation taking three visible arguments.
11352 *
11353 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11354 * @param a0 The first extra argument.
11355 * @param a1 The second extra argument.
11356 * @param a2 The third extra argument.
11357 */
11358#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11359 do { \
11360 IEM_MC_PREPARE_SSE_USAGE(); \
11361 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11362 } while (0)
11363
11364/** @note Not for IOPL or IF testing. */
11365#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11366/** @note Not for IOPL or IF testing. */
11367#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11368/** @note Not for IOPL or IF testing. */
11369#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11370/** @note Not for IOPL or IF testing. */
11371#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11372/** @note Not for IOPL or IF testing. */
11373#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11374 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11375 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11376/** @note Not for IOPL or IF testing. */
11377#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11378 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11379 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11380/** @note Not for IOPL or IF testing. */
11381#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11382 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11383 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11384 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11385/** @note Not for IOPL or IF testing. */
11386#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11387 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11388 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11389 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11390#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11391#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11392#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11393/** @note Not for IOPL or IF testing. */
11394#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11395 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11396 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11397/** @note Not for IOPL or IF testing. */
11398#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11399 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11400 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11401/** @note Not for IOPL or IF testing. */
11402#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11403 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11404 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11405/** @note Not for IOPL or IF testing. */
11406#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11407 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11408 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11409/** @note Not for IOPL or IF testing. */
11410#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11411 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11412 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11413/** @note Not for IOPL or IF testing. */
11414#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11415 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11416 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11417#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11418#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11419
11420#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11421 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11422#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11423 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11424#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11425 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11426#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11427 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11428#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11429 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11430#define IEM_MC_IF_FCW_IM() \
11431 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11432
11433#define IEM_MC_ELSE() } else {
11434#define IEM_MC_ENDIF() } do {} while (0)
11435
11436/** @} */
11437
11438
11439/** @name Opcode Debug Helpers.
11440 * @{
11441 */
11442#ifdef VBOX_WITH_STATISTICS
11443# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11444#else
11445# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11446#endif
11447
11448#ifdef DEBUG
11449# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11450 do { \
11451 IEMOP_INC_STATS(a_Stats); \
11452 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11453 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11454 } while (0)
11455#else
11456# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11457#endif
11458
11459/** @} */
11460
11461
11462/** @name Opcode Helpers.
11463 * @{
11464 */
11465
11466#ifdef IN_RING3
11467# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11468 do { \
11469 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11470 else \
11471 { \
11472 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11473 return IEMOP_RAISE_INVALID_OPCODE(); \
11474 } \
11475 } while (0)
11476#else
11477# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11478 do { \
11479 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11480 else return IEMOP_RAISE_INVALID_OPCODE(); \
11481 } while (0)
11482#endif
11483
11484/** The instruction requires a 186 or later. */
11485#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11486# define IEMOP_HLP_MIN_186() do { } while (0)
11487#else
11488# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11489#endif
11490
11491/** The instruction requires a 286 or later. */
11492#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11493# define IEMOP_HLP_MIN_286() do { } while (0)
11494#else
11495# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11496#endif
11497
11498/** The instruction requires a 386 or later. */
11499#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11500# define IEMOP_HLP_MIN_386() do { } while (0)
11501#else
11502# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11503#endif
11504
11505/** The instruction requires a 386 or later if the given expression is true. */
11506#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11507# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11508#else
11509# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11510#endif
11511
11512/** The instruction requires a 486 or later. */
11513#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11514# define IEMOP_HLP_MIN_486() do { } while (0)
11515#else
11516# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11517#endif
11518
11519/** The instruction requires a Pentium (586) or later. */
11520#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11521# define IEMOP_HLP_MIN_586() do { } while (0)
11522#else
11523# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11524#endif
11525
11526/** The instruction requires a PentiumPro (686) or later. */
11527#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11528# define IEMOP_HLP_MIN_686() do { } while (0)
11529#else
11530# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11531#endif
11532
11533
11534/** The instruction raises an \#UD in real and V8086 mode. */
11535#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11536 do \
11537 { \
11538 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11539 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11540 } while (0)
11541
11542/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11543 * 64-bit mode. */
11544#define IEMOP_HLP_NO_64BIT() \
11545 do \
11546 { \
11547 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11548 return IEMOP_RAISE_INVALID_OPCODE(); \
11549 } while (0)
11550
11551/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11552 * 64-bit mode. */
11553#define IEMOP_HLP_ONLY_64BIT() \
11554 do \
11555 { \
11556 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11557 return IEMOP_RAISE_INVALID_OPCODE(); \
11558 } while (0)
11559
11560/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11561#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11562 do \
11563 { \
11564 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11565 iemRecalEffOpSize64Default(pVCpu); \
11566 } while (0)
11567
11568/** The instruction has 64-bit operand size if 64-bit mode. */
11569#define IEMOP_HLP_64BIT_OP_SIZE() \
11570 do \
11571 { \
11572 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11573 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11574 } while (0)
11575
11576/** Only a REX prefix immediately preceeding the first opcode byte takes
11577 * effect. This macro helps ensuring this as well as logging bad guest code. */
11578#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11579 do \
11580 { \
11581 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11582 { \
11583 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11584 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11585 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11586 pVCpu->iem.s.uRexB = 0; \
11587 pVCpu->iem.s.uRexIndex = 0; \
11588 pVCpu->iem.s.uRexReg = 0; \
11589 iemRecalEffOpSize(pVCpu); \
11590 } \
11591 } while (0)
11592
11593/**
11594 * Done decoding.
11595 */
11596#define IEMOP_HLP_DONE_DECODING() \
11597 do \
11598 { \
11599 /*nothing for now, maybe later... */ \
11600 } while (0)
11601
11602/**
11603 * Done decoding, raise \#UD exception if lock prefix present.
11604 */
11605#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11606 do \
11607 { \
11608 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11609 { /* likely */ } \
11610 else \
11611 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11612 } while (0)
11613#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11614 do \
11615 { \
11616 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11617 { /* likely */ } \
11618 else \
11619 { \
11620 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11621 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11622 } \
11623 } while (0)
11624#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11625 do \
11626 { \
11627 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11628 { /* likely */ } \
11629 else \
11630 { \
11631 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11632 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11633 } \
11634 } while (0)
11635
11636/**
11637 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11638 * are present.
11639 */
11640#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11641 do \
11642 { \
11643 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11644 { /* likely */ } \
11645 else \
11646 return IEMOP_RAISE_INVALID_OPCODE(); \
11647 } while (0)
11648
11649
11650/**
11651 * Calculates the effective address of a ModR/M memory operand.
11652 *
11653 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11654 *
11655 * @return Strict VBox status code.
11656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11657 * @param bRm The ModRM byte.
11658 * @param cbImm The size of any immediate following the
11659 * effective address opcode bytes. Important for
11660 * RIP relative addressing.
11661 * @param pGCPtrEff Where to return the effective address.
11662 */
11663IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11664{
11665 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11666 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11667# define SET_SS_DEF() \
11668 do \
11669 { \
11670 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11671 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11672 } while (0)
11673
11674 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11675 {
11676/** @todo Check the effective address size crap! */
11677 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11678 {
11679 uint16_t u16EffAddr;
11680
11681 /* Handle the disp16 form with no registers first. */
11682 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11683 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11684 else
11685 {
11686 /* Get the displacment. */
11687 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11688 {
11689 case 0: u16EffAddr = 0; break;
11690 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11691 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11692 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11693 }
11694
11695 /* Add the base and index registers to the disp. */
11696 switch (bRm & X86_MODRM_RM_MASK)
11697 {
11698 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11699 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11700 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11701 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11702 case 4: u16EffAddr += pCtx->si; break;
11703 case 5: u16EffAddr += pCtx->di; break;
11704 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11705 case 7: u16EffAddr += pCtx->bx; break;
11706 }
11707 }
11708
11709 *pGCPtrEff = u16EffAddr;
11710 }
11711 else
11712 {
11713 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11714 uint32_t u32EffAddr;
11715
11716 /* Handle the disp32 form with no registers first. */
11717 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11718 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11719 else
11720 {
11721 /* Get the register (or SIB) value. */
11722 switch ((bRm & X86_MODRM_RM_MASK))
11723 {
11724 case 0: u32EffAddr = pCtx->eax; break;
11725 case 1: u32EffAddr = pCtx->ecx; break;
11726 case 2: u32EffAddr = pCtx->edx; break;
11727 case 3: u32EffAddr = pCtx->ebx; break;
11728 case 4: /* SIB */
11729 {
11730 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11731
11732 /* Get the index and scale it. */
11733 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11734 {
11735 case 0: u32EffAddr = pCtx->eax; break;
11736 case 1: u32EffAddr = pCtx->ecx; break;
11737 case 2: u32EffAddr = pCtx->edx; break;
11738 case 3: u32EffAddr = pCtx->ebx; break;
11739 case 4: u32EffAddr = 0; /*none */ break;
11740 case 5: u32EffAddr = pCtx->ebp; break;
11741 case 6: u32EffAddr = pCtx->esi; break;
11742 case 7: u32EffAddr = pCtx->edi; break;
11743 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11744 }
11745 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11746
11747 /* add base */
11748 switch (bSib & X86_SIB_BASE_MASK)
11749 {
11750 case 0: u32EffAddr += pCtx->eax; break;
11751 case 1: u32EffAddr += pCtx->ecx; break;
11752 case 2: u32EffAddr += pCtx->edx; break;
11753 case 3: u32EffAddr += pCtx->ebx; break;
11754 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11755 case 5:
11756 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11757 {
11758 u32EffAddr += pCtx->ebp;
11759 SET_SS_DEF();
11760 }
11761 else
11762 {
11763 uint32_t u32Disp;
11764 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11765 u32EffAddr += u32Disp;
11766 }
11767 break;
11768 case 6: u32EffAddr += pCtx->esi; break;
11769 case 7: u32EffAddr += pCtx->edi; break;
11770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11771 }
11772 break;
11773 }
11774 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11775 case 6: u32EffAddr = pCtx->esi; break;
11776 case 7: u32EffAddr = pCtx->edi; break;
11777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11778 }
11779
11780 /* Get and add the displacement. */
11781 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11782 {
11783 case 0:
11784 break;
11785 case 1:
11786 {
11787 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11788 u32EffAddr += i8Disp;
11789 break;
11790 }
11791 case 2:
11792 {
11793 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11794 u32EffAddr += u32Disp;
11795 break;
11796 }
11797 default:
11798 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11799 }
11800
11801 }
11802 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11803 *pGCPtrEff = u32EffAddr;
11804 else
11805 {
11806 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11807 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11808 }
11809 }
11810 }
11811 else
11812 {
11813 uint64_t u64EffAddr;
11814
11815 /* Handle the rip+disp32 form with no registers first. */
11816 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11817 {
11818 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11819 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11820 }
11821 else
11822 {
11823 /* Get the register (or SIB) value. */
11824 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11825 {
11826 case 0: u64EffAddr = pCtx->rax; break;
11827 case 1: u64EffAddr = pCtx->rcx; break;
11828 case 2: u64EffAddr = pCtx->rdx; break;
11829 case 3: u64EffAddr = pCtx->rbx; break;
11830 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11831 case 6: u64EffAddr = pCtx->rsi; break;
11832 case 7: u64EffAddr = pCtx->rdi; break;
11833 case 8: u64EffAddr = pCtx->r8; break;
11834 case 9: u64EffAddr = pCtx->r9; break;
11835 case 10: u64EffAddr = pCtx->r10; break;
11836 case 11: u64EffAddr = pCtx->r11; break;
11837 case 13: u64EffAddr = pCtx->r13; break;
11838 case 14: u64EffAddr = pCtx->r14; break;
11839 case 15: u64EffAddr = pCtx->r15; break;
11840 /* SIB */
11841 case 4:
11842 case 12:
11843 {
11844 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11845
11846 /* Get the index and scale it. */
11847 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11848 {
11849 case 0: u64EffAddr = pCtx->rax; break;
11850 case 1: u64EffAddr = pCtx->rcx; break;
11851 case 2: u64EffAddr = pCtx->rdx; break;
11852 case 3: u64EffAddr = pCtx->rbx; break;
11853 case 4: u64EffAddr = 0; /*none */ break;
11854 case 5: u64EffAddr = pCtx->rbp; break;
11855 case 6: u64EffAddr = pCtx->rsi; break;
11856 case 7: u64EffAddr = pCtx->rdi; break;
11857 case 8: u64EffAddr = pCtx->r8; break;
11858 case 9: u64EffAddr = pCtx->r9; break;
11859 case 10: u64EffAddr = pCtx->r10; break;
11860 case 11: u64EffAddr = pCtx->r11; break;
11861 case 12: u64EffAddr = pCtx->r12; break;
11862 case 13: u64EffAddr = pCtx->r13; break;
11863 case 14: u64EffAddr = pCtx->r14; break;
11864 case 15: u64EffAddr = pCtx->r15; break;
11865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11866 }
11867 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11868
11869 /* add base */
11870 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11871 {
11872 case 0: u64EffAddr += pCtx->rax; break;
11873 case 1: u64EffAddr += pCtx->rcx; break;
11874 case 2: u64EffAddr += pCtx->rdx; break;
11875 case 3: u64EffAddr += pCtx->rbx; break;
11876 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11877 case 6: u64EffAddr += pCtx->rsi; break;
11878 case 7: u64EffAddr += pCtx->rdi; break;
11879 case 8: u64EffAddr += pCtx->r8; break;
11880 case 9: u64EffAddr += pCtx->r9; break;
11881 case 10: u64EffAddr += pCtx->r10; break;
11882 case 11: u64EffAddr += pCtx->r11; break;
11883 case 12: u64EffAddr += pCtx->r12; break;
11884 case 14: u64EffAddr += pCtx->r14; break;
11885 case 15: u64EffAddr += pCtx->r15; break;
11886 /* complicated encodings */
11887 case 5:
11888 case 13:
11889 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11890 {
11891 if (!pVCpu->iem.s.uRexB)
11892 {
11893 u64EffAddr += pCtx->rbp;
11894 SET_SS_DEF();
11895 }
11896 else
11897 u64EffAddr += pCtx->r13;
11898 }
11899 else
11900 {
11901 uint32_t u32Disp;
11902 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11903 u64EffAddr += (int32_t)u32Disp;
11904 }
11905 break;
11906 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11907 }
11908 break;
11909 }
11910 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11911 }
11912
11913 /* Get and add the displacement. */
11914 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11915 {
11916 case 0:
11917 break;
11918 case 1:
11919 {
11920 int8_t i8Disp;
11921 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11922 u64EffAddr += i8Disp;
11923 break;
11924 }
11925 case 2:
11926 {
11927 uint32_t u32Disp;
11928 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11929 u64EffAddr += (int32_t)u32Disp;
11930 break;
11931 }
11932 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11933 }
11934
11935 }
11936
11937 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11938 *pGCPtrEff = u64EffAddr;
11939 else
11940 {
11941 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11942 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11943 }
11944 }
11945
11946 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11947 return VINF_SUCCESS;
11948}
11949
11950
11951/**
11952 * Calculates the effective address of a ModR/M memory operand.
11953 *
11954 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11955 *
11956 * @return Strict VBox status code.
11957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11958 * @param bRm The ModRM byte.
11959 * @param cbImm The size of any immediate following the
11960 * effective address opcode bytes. Important for
11961 * RIP relative addressing.
11962 * @param pGCPtrEff Where to return the effective address.
11963 * @param offRsp RSP displacement.
11964 */
11965IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11966{
11967 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11968 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11969# define SET_SS_DEF() \
11970 do \
11971 { \
11972 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11973 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11974 } while (0)
11975
11976 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11977 {
11978/** @todo Check the effective address size crap! */
11979 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11980 {
11981 uint16_t u16EffAddr;
11982
11983 /* Handle the disp16 form with no registers first. */
11984 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11985 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11986 else
11987 {
11988 /* Get the displacment. */
11989 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11990 {
11991 case 0: u16EffAddr = 0; break;
11992 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11993 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11994 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11995 }
11996
11997 /* Add the base and index registers to the disp. */
11998 switch (bRm & X86_MODRM_RM_MASK)
11999 {
12000 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12001 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12002 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12003 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12004 case 4: u16EffAddr += pCtx->si; break;
12005 case 5: u16EffAddr += pCtx->di; break;
12006 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12007 case 7: u16EffAddr += pCtx->bx; break;
12008 }
12009 }
12010
12011 *pGCPtrEff = u16EffAddr;
12012 }
12013 else
12014 {
12015 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12016 uint32_t u32EffAddr;
12017
12018 /* Handle the disp32 form with no registers first. */
12019 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12020 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12021 else
12022 {
12023 /* Get the register (or SIB) value. */
12024 switch ((bRm & X86_MODRM_RM_MASK))
12025 {
12026 case 0: u32EffAddr = pCtx->eax; break;
12027 case 1: u32EffAddr = pCtx->ecx; break;
12028 case 2: u32EffAddr = pCtx->edx; break;
12029 case 3: u32EffAddr = pCtx->ebx; break;
12030 case 4: /* SIB */
12031 {
12032 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12033
12034 /* Get the index and scale it. */
12035 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12036 {
12037 case 0: u32EffAddr = pCtx->eax; break;
12038 case 1: u32EffAddr = pCtx->ecx; break;
12039 case 2: u32EffAddr = pCtx->edx; break;
12040 case 3: u32EffAddr = pCtx->ebx; break;
12041 case 4: u32EffAddr = 0; /*none */ break;
12042 case 5: u32EffAddr = pCtx->ebp; break;
12043 case 6: u32EffAddr = pCtx->esi; break;
12044 case 7: u32EffAddr = pCtx->edi; break;
12045 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12046 }
12047 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12048
12049 /* add base */
12050 switch (bSib & X86_SIB_BASE_MASK)
12051 {
12052 case 0: u32EffAddr += pCtx->eax; break;
12053 case 1: u32EffAddr += pCtx->ecx; break;
12054 case 2: u32EffAddr += pCtx->edx; break;
12055 case 3: u32EffAddr += pCtx->ebx; break;
12056 case 4:
12057 u32EffAddr += pCtx->esp + offRsp;
12058 SET_SS_DEF();
12059 break;
12060 case 5:
12061 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12062 {
12063 u32EffAddr += pCtx->ebp;
12064 SET_SS_DEF();
12065 }
12066 else
12067 {
12068 uint32_t u32Disp;
12069 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12070 u32EffAddr += u32Disp;
12071 }
12072 break;
12073 case 6: u32EffAddr += pCtx->esi; break;
12074 case 7: u32EffAddr += pCtx->edi; break;
12075 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12076 }
12077 break;
12078 }
12079 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12080 case 6: u32EffAddr = pCtx->esi; break;
12081 case 7: u32EffAddr = pCtx->edi; break;
12082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12083 }
12084
12085 /* Get and add the displacement. */
12086 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12087 {
12088 case 0:
12089 break;
12090 case 1:
12091 {
12092 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12093 u32EffAddr += i8Disp;
12094 break;
12095 }
12096 case 2:
12097 {
12098 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12099 u32EffAddr += u32Disp;
12100 break;
12101 }
12102 default:
12103 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12104 }
12105
12106 }
12107 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12108 *pGCPtrEff = u32EffAddr;
12109 else
12110 {
12111 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12112 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12113 }
12114 }
12115 }
12116 else
12117 {
12118 uint64_t u64EffAddr;
12119
12120 /* Handle the rip+disp32 form with no registers first. */
12121 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12122 {
12123 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12124 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12125 }
12126 else
12127 {
12128 /* Get the register (or SIB) value. */
12129 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12130 {
12131 case 0: u64EffAddr = pCtx->rax; break;
12132 case 1: u64EffAddr = pCtx->rcx; break;
12133 case 2: u64EffAddr = pCtx->rdx; break;
12134 case 3: u64EffAddr = pCtx->rbx; break;
12135 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12136 case 6: u64EffAddr = pCtx->rsi; break;
12137 case 7: u64EffAddr = pCtx->rdi; break;
12138 case 8: u64EffAddr = pCtx->r8; break;
12139 case 9: u64EffAddr = pCtx->r9; break;
12140 case 10: u64EffAddr = pCtx->r10; break;
12141 case 11: u64EffAddr = pCtx->r11; break;
12142 case 13: u64EffAddr = pCtx->r13; break;
12143 case 14: u64EffAddr = pCtx->r14; break;
12144 case 15: u64EffAddr = pCtx->r15; break;
12145 /* SIB */
12146 case 4:
12147 case 12:
12148 {
12149 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12150
12151 /* Get the index and scale it. */
12152 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12153 {
12154 case 0: u64EffAddr = pCtx->rax; break;
12155 case 1: u64EffAddr = pCtx->rcx; break;
12156 case 2: u64EffAddr = pCtx->rdx; break;
12157 case 3: u64EffAddr = pCtx->rbx; break;
12158 case 4: u64EffAddr = 0; /*none */ break;
12159 case 5: u64EffAddr = pCtx->rbp; break;
12160 case 6: u64EffAddr = pCtx->rsi; break;
12161 case 7: u64EffAddr = pCtx->rdi; break;
12162 case 8: u64EffAddr = pCtx->r8; break;
12163 case 9: u64EffAddr = pCtx->r9; break;
12164 case 10: u64EffAddr = pCtx->r10; break;
12165 case 11: u64EffAddr = pCtx->r11; break;
12166 case 12: u64EffAddr = pCtx->r12; break;
12167 case 13: u64EffAddr = pCtx->r13; break;
12168 case 14: u64EffAddr = pCtx->r14; break;
12169 case 15: u64EffAddr = pCtx->r15; break;
12170 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12171 }
12172 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12173
12174 /* add base */
12175 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12176 {
12177 case 0: u64EffAddr += pCtx->rax; break;
12178 case 1: u64EffAddr += pCtx->rcx; break;
12179 case 2: u64EffAddr += pCtx->rdx; break;
12180 case 3: u64EffAddr += pCtx->rbx; break;
12181 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12182 case 6: u64EffAddr += pCtx->rsi; break;
12183 case 7: u64EffAddr += pCtx->rdi; break;
12184 case 8: u64EffAddr += pCtx->r8; break;
12185 case 9: u64EffAddr += pCtx->r9; break;
12186 case 10: u64EffAddr += pCtx->r10; break;
12187 case 11: u64EffAddr += pCtx->r11; break;
12188 case 12: u64EffAddr += pCtx->r12; break;
12189 case 14: u64EffAddr += pCtx->r14; break;
12190 case 15: u64EffAddr += pCtx->r15; break;
12191 /* complicated encodings */
12192 case 5:
12193 case 13:
12194 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12195 {
12196 if (!pVCpu->iem.s.uRexB)
12197 {
12198 u64EffAddr += pCtx->rbp;
12199 SET_SS_DEF();
12200 }
12201 else
12202 u64EffAddr += pCtx->r13;
12203 }
12204 else
12205 {
12206 uint32_t u32Disp;
12207 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12208 u64EffAddr += (int32_t)u32Disp;
12209 }
12210 break;
12211 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12212 }
12213 break;
12214 }
12215 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12216 }
12217
12218 /* Get and add the displacement. */
12219 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12220 {
12221 case 0:
12222 break;
12223 case 1:
12224 {
12225 int8_t i8Disp;
12226 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12227 u64EffAddr += i8Disp;
12228 break;
12229 }
12230 case 2:
12231 {
12232 uint32_t u32Disp;
12233 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12234 u64EffAddr += (int32_t)u32Disp;
12235 break;
12236 }
12237 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12238 }
12239
12240 }
12241
12242 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12243 *pGCPtrEff = u64EffAddr;
12244 else
12245 {
12246 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12247 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12248 }
12249 }
12250
12251 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12252 return VINF_SUCCESS;
12253}
12254
12255
12256#ifdef IEM_WITH_SETJMP
12257/**
12258 * Calculates the effective address of a ModR/M memory operand.
12259 *
12260 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12261 *
12262 * May longjmp on internal error.
12263 *
12264 * @return The effective address.
12265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12266 * @param bRm The ModRM byte.
12267 * @param cbImm The size of any immediate following the
12268 * effective address opcode bytes. Important for
12269 * RIP relative addressing.
12270 */
12271IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12272{
12273 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12274 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12275# define SET_SS_DEF() \
12276 do \
12277 { \
12278 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12279 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12280 } while (0)
12281
12282 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12283 {
12284/** @todo Check the effective address size crap! */
12285 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12286 {
12287 uint16_t u16EffAddr;
12288
12289 /* Handle the disp16 form with no registers first. */
12290 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12291 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12292 else
12293 {
12294 /* Get the displacment. */
12295 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12296 {
12297 case 0: u16EffAddr = 0; break;
12298 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12299 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12300 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12301 }
12302
12303 /* Add the base and index registers to the disp. */
12304 switch (bRm & X86_MODRM_RM_MASK)
12305 {
12306 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12307 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12308 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12309 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12310 case 4: u16EffAddr += pCtx->si; break;
12311 case 5: u16EffAddr += pCtx->di; break;
12312 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12313 case 7: u16EffAddr += pCtx->bx; break;
12314 }
12315 }
12316
12317 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12318 return u16EffAddr;
12319 }
12320
12321 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12322 uint32_t u32EffAddr;
12323
12324 /* Handle the disp32 form with no registers first. */
12325 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12326 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12327 else
12328 {
12329 /* Get the register (or SIB) value. */
12330 switch ((bRm & X86_MODRM_RM_MASK))
12331 {
12332 case 0: u32EffAddr = pCtx->eax; break;
12333 case 1: u32EffAddr = pCtx->ecx; break;
12334 case 2: u32EffAddr = pCtx->edx; break;
12335 case 3: u32EffAddr = pCtx->ebx; break;
12336 case 4: /* SIB */
12337 {
12338 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12339
12340 /* Get the index and scale it. */
12341 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12342 {
12343 case 0: u32EffAddr = pCtx->eax; break;
12344 case 1: u32EffAddr = pCtx->ecx; break;
12345 case 2: u32EffAddr = pCtx->edx; break;
12346 case 3: u32EffAddr = pCtx->ebx; break;
12347 case 4: u32EffAddr = 0; /*none */ break;
12348 case 5: u32EffAddr = pCtx->ebp; break;
12349 case 6: u32EffAddr = pCtx->esi; break;
12350 case 7: u32EffAddr = pCtx->edi; break;
12351 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12352 }
12353 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12354
12355 /* add base */
12356 switch (bSib & X86_SIB_BASE_MASK)
12357 {
12358 case 0: u32EffAddr += pCtx->eax; break;
12359 case 1: u32EffAddr += pCtx->ecx; break;
12360 case 2: u32EffAddr += pCtx->edx; break;
12361 case 3: u32EffAddr += pCtx->ebx; break;
12362 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12363 case 5:
12364 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12365 {
12366 u32EffAddr += pCtx->ebp;
12367 SET_SS_DEF();
12368 }
12369 else
12370 {
12371 uint32_t u32Disp;
12372 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12373 u32EffAddr += u32Disp;
12374 }
12375 break;
12376 case 6: u32EffAddr += pCtx->esi; break;
12377 case 7: u32EffAddr += pCtx->edi; break;
12378 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12379 }
12380 break;
12381 }
12382 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12383 case 6: u32EffAddr = pCtx->esi; break;
12384 case 7: u32EffAddr = pCtx->edi; break;
12385 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12386 }
12387
12388 /* Get and add the displacement. */
12389 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12390 {
12391 case 0:
12392 break;
12393 case 1:
12394 {
12395 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12396 u32EffAddr += i8Disp;
12397 break;
12398 }
12399 case 2:
12400 {
12401 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12402 u32EffAddr += u32Disp;
12403 break;
12404 }
12405 default:
12406 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12407 }
12408 }
12409
12410 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12411 {
12412 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12413 return u32EffAddr;
12414 }
12415 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12416 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12417 return u32EffAddr & UINT16_MAX;
12418 }
12419
12420 uint64_t u64EffAddr;
12421
12422 /* Handle the rip+disp32 form with no registers first. */
12423 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12424 {
12425 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12426 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12427 }
12428 else
12429 {
12430 /* Get the register (or SIB) value. */
12431 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12432 {
12433 case 0: u64EffAddr = pCtx->rax; break;
12434 case 1: u64EffAddr = pCtx->rcx; break;
12435 case 2: u64EffAddr = pCtx->rdx; break;
12436 case 3: u64EffAddr = pCtx->rbx; break;
12437 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12438 case 6: u64EffAddr = pCtx->rsi; break;
12439 case 7: u64EffAddr = pCtx->rdi; break;
12440 case 8: u64EffAddr = pCtx->r8; break;
12441 case 9: u64EffAddr = pCtx->r9; break;
12442 case 10: u64EffAddr = pCtx->r10; break;
12443 case 11: u64EffAddr = pCtx->r11; break;
12444 case 13: u64EffAddr = pCtx->r13; break;
12445 case 14: u64EffAddr = pCtx->r14; break;
12446 case 15: u64EffAddr = pCtx->r15; break;
12447 /* SIB */
12448 case 4:
12449 case 12:
12450 {
12451 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12452
12453 /* Get the index and scale it. */
12454 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12455 {
12456 case 0: u64EffAddr = pCtx->rax; break;
12457 case 1: u64EffAddr = pCtx->rcx; break;
12458 case 2: u64EffAddr = pCtx->rdx; break;
12459 case 3: u64EffAddr = pCtx->rbx; break;
12460 case 4: u64EffAddr = 0; /*none */ break;
12461 case 5: u64EffAddr = pCtx->rbp; break;
12462 case 6: u64EffAddr = pCtx->rsi; break;
12463 case 7: u64EffAddr = pCtx->rdi; break;
12464 case 8: u64EffAddr = pCtx->r8; break;
12465 case 9: u64EffAddr = pCtx->r9; break;
12466 case 10: u64EffAddr = pCtx->r10; break;
12467 case 11: u64EffAddr = pCtx->r11; break;
12468 case 12: u64EffAddr = pCtx->r12; break;
12469 case 13: u64EffAddr = pCtx->r13; break;
12470 case 14: u64EffAddr = pCtx->r14; break;
12471 case 15: u64EffAddr = pCtx->r15; break;
12472 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12473 }
12474 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12475
12476 /* add base */
12477 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12478 {
12479 case 0: u64EffAddr += pCtx->rax; break;
12480 case 1: u64EffAddr += pCtx->rcx; break;
12481 case 2: u64EffAddr += pCtx->rdx; break;
12482 case 3: u64EffAddr += pCtx->rbx; break;
12483 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12484 case 6: u64EffAddr += pCtx->rsi; break;
12485 case 7: u64EffAddr += pCtx->rdi; break;
12486 case 8: u64EffAddr += pCtx->r8; break;
12487 case 9: u64EffAddr += pCtx->r9; break;
12488 case 10: u64EffAddr += pCtx->r10; break;
12489 case 11: u64EffAddr += pCtx->r11; break;
12490 case 12: u64EffAddr += pCtx->r12; break;
12491 case 14: u64EffAddr += pCtx->r14; break;
12492 case 15: u64EffAddr += pCtx->r15; break;
12493 /* complicated encodings */
12494 case 5:
12495 case 13:
12496 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12497 {
12498 if (!pVCpu->iem.s.uRexB)
12499 {
12500 u64EffAddr += pCtx->rbp;
12501 SET_SS_DEF();
12502 }
12503 else
12504 u64EffAddr += pCtx->r13;
12505 }
12506 else
12507 {
12508 uint32_t u32Disp;
12509 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12510 u64EffAddr += (int32_t)u32Disp;
12511 }
12512 break;
12513 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12514 }
12515 break;
12516 }
12517 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12518 }
12519
12520 /* Get and add the displacement. */
12521 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12522 {
12523 case 0:
12524 break;
12525 case 1:
12526 {
12527 int8_t i8Disp;
12528 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12529 u64EffAddr += i8Disp;
12530 break;
12531 }
12532 case 2:
12533 {
12534 uint32_t u32Disp;
12535 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12536 u64EffAddr += (int32_t)u32Disp;
12537 break;
12538 }
12539 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12540 }
12541
12542 }
12543
12544 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12545 {
12546 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12547 return u64EffAddr;
12548 }
12549 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12550 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12551 return u64EffAddr & UINT32_MAX;
12552}
12553#endif /* IEM_WITH_SETJMP */
12554
12555
12556/** @} */
12557
12558
12559
12560/*
12561 * Include the instructions
12562 */
12563#include "IEMAllInstructions.cpp.h"
12564
12565
12566
12567
12568#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12569
12570/**
12571 * Sets up execution verification mode.
12572 */
12573IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12574{
12575 PVMCPU pVCpu = pVCpu;
12576 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12577
12578 /*
12579 * Always note down the address of the current instruction.
12580 */
12581 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12582 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12583
12584 /*
12585 * Enable verification and/or logging.
12586 */
12587 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12588 if ( fNewNoRem
12589 && ( 0
12590#if 0 /* auto enable on first paged protected mode interrupt */
12591 || ( pOrgCtx->eflags.Bits.u1IF
12592 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12593 && TRPMHasTrap(pVCpu)
12594 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12595#endif
12596#if 0
12597 || ( pOrgCtx->cs == 0x10
12598 && ( pOrgCtx->rip == 0x90119e3e
12599 || pOrgCtx->rip == 0x901d9810)
12600#endif
12601#if 0 /* Auto enable DSL - FPU stuff. */
12602 || ( pOrgCtx->cs == 0x10
12603 && (// pOrgCtx->rip == 0xc02ec07f
12604 //|| pOrgCtx->rip == 0xc02ec082
12605 //|| pOrgCtx->rip == 0xc02ec0c9
12606 0
12607 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12608#endif
12609#if 0 /* Auto enable DSL - fstp st0 stuff. */
12610 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12611#endif
12612#if 0
12613 || pOrgCtx->rip == 0x9022bb3a
12614#endif
12615#if 0
12616 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12617#endif
12618#if 0
12619 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12620 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12621#endif
12622#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12623 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12624 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12625 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12626#endif
12627#if 0 /* NT4SP1 - xadd early boot. */
12628 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12629#endif
12630#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12631 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12632#endif
12633#if 0 /* NT4SP1 - cmpxchg (AMD). */
12634 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12635#endif
12636#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12637 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12638#endif
12639#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12640 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12641
12642#endif
12643#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12644 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12645
12646#endif
12647#if 0 /* NT4SP1 - frstor [ecx] */
12648 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12649#endif
12650#if 0 /* xxxxxx - All long mode code. */
12651 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12652#endif
12653#if 0 /* rep movsq linux 3.7 64-bit boot. */
12654 || (pOrgCtx->rip == 0x0000000000100241)
12655#endif
12656#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12657 || (pOrgCtx->rip == 0x000000000215e240)
12658#endif
12659#if 0 /* DOS's size-overridden iret to v8086. */
12660 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12661#endif
12662 )
12663 )
12664 {
12665 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12666 RTLogFlags(NULL, "enabled");
12667 fNewNoRem = false;
12668 }
12669 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12670 {
12671 pVCpu->iem.s.fNoRem = fNewNoRem;
12672 if (!fNewNoRem)
12673 {
12674 LogAlways(("Enabling verification mode!\n"));
12675 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12676 }
12677 else
12678 LogAlways(("Disabling verification mode!\n"));
12679 }
12680
12681 /*
12682 * Switch state.
12683 */
12684 if (IEM_VERIFICATION_ENABLED(pVCpu))
12685 {
12686 static CPUMCTX s_DebugCtx; /* Ugly! */
12687
12688 s_DebugCtx = *pOrgCtx;
12689 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12690 }
12691
12692 /*
12693 * See if there is an interrupt pending in TRPM and inject it if we can.
12694 */
12695 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12696 if ( pOrgCtx->eflags.Bits.u1IF
12697 && TRPMHasTrap(pVCpu)
12698 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12699 {
12700 uint8_t u8TrapNo;
12701 TRPMEVENT enmType;
12702 RTGCUINT uErrCode;
12703 RTGCPTR uCr2;
12704 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12705 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12706 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12707 TRPMResetTrap(pVCpu);
12708 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12709 }
12710
12711 /*
12712 * Reset the counters.
12713 */
12714 pVCpu->iem.s.cIOReads = 0;
12715 pVCpu->iem.s.cIOWrites = 0;
12716 pVCpu->iem.s.fIgnoreRaxRdx = false;
12717 pVCpu->iem.s.fOverlappingMovs = false;
12718 pVCpu->iem.s.fProblematicMemory = false;
12719 pVCpu->iem.s.fUndefinedEFlags = 0;
12720
12721 if (IEM_VERIFICATION_ENABLED(pVCpu))
12722 {
12723 /*
12724 * Free all verification records.
12725 */
12726 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12727 pVCpu->iem.s.pIemEvtRecHead = NULL;
12728 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12729 do
12730 {
12731 while (pEvtRec)
12732 {
12733 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12734 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12735 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12736 pEvtRec = pNext;
12737 }
12738 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12739 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12740 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12741 } while (pEvtRec);
12742 }
12743}
12744
12745
12746/**
12747 * Allocate an event record.
12748 * @returns Pointer to a record.
12749 */
12750IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12751{
12752 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12753 return NULL;
12754
12755 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12756 if (pEvtRec)
12757 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12758 else
12759 {
12760 if (!pVCpu->iem.s.ppIemEvtRecNext)
12761 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12762
12763 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12764 if (!pEvtRec)
12765 return NULL;
12766 }
12767 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12768 pEvtRec->pNext = NULL;
12769 return pEvtRec;
12770}
12771
12772
12773/**
12774 * IOMMMIORead notification.
12775 */
12776VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12777{
12778 PVMCPU pVCpu = VMMGetCpu(pVM);
12779 if (!pVCpu)
12780 return;
12781 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12782 if (!pEvtRec)
12783 return;
12784 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12785 pEvtRec->u.RamRead.GCPhys = GCPhys;
12786 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12787 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12788 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12789}
12790
12791
12792/**
12793 * IOMMMIOWrite notification.
12794 */
12795VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12796{
12797 PVMCPU pVCpu = VMMGetCpu(pVM);
12798 if (!pVCpu)
12799 return;
12800 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12801 if (!pEvtRec)
12802 return;
12803 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12804 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12805 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12806 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12807 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12808 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12809 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12810 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12811 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12812}
12813
12814
12815/**
12816 * IOMIOPortRead notification.
12817 */
12818VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12819{
12820 PVMCPU pVCpu = VMMGetCpu(pVM);
12821 if (!pVCpu)
12822 return;
12823 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12824 if (!pEvtRec)
12825 return;
12826 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12827 pEvtRec->u.IOPortRead.Port = Port;
12828 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12829 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12830 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12831}
12832
12833/**
12834 * IOMIOPortWrite notification.
12835 */
12836VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12837{
12838 PVMCPU pVCpu = VMMGetCpu(pVM);
12839 if (!pVCpu)
12840 return;
12841 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12842 if (!pEvtRec)
12843 return;
12844 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12845 pEvtRec->u.IOPortWrite.Port = Port;
12846 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12847 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12848 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12849 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12850}
12851
12852
12853VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12854{
12855 PVMCPU pVCpu = VMMGetCpu(pVM);
12856 if (!pVCpu)
12857 return;
12858 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12859 if (!pEvtRec)
12860 return;
12861 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12862 pEvtRec->u.IOPortStrRead.Port = Port;
12863 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12864 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12865 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12866 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12867}
12868
12869
12870VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12871{
12872 PVMCPU pVCpu = VMMGetCpu(pVM);
12873 if (!pVCpu)
12874 return;
12875 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12876 if (!pEvtRec)
12877 return;
12878 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12879 pEvtRec->u.IOPortStrWrite.Port = Port;
12880 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12881 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12882 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12883 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12884}
12885
12886
12887/**
12888 * Fakes and records an I/O port read.
12889 *
12890 * @returns VINF_SUCCESS.
12891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12892 * @param Port The I/O port.
12893 * @param pu32Value Where to store the fake value.
12894 * @param cbValue The size of the access.
12895 */
12896IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12897{
12898 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12899 if (pEvtRec)
12900 {
12901 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12902 pEvtRec->u.IOPortRead.Port = Port;
12903 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12904 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12905 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12906 }
12907 pVCpu->iem.s.cIOReads++;
12908 *pu32Value = 0xcccccccc;
12909 return VINF_SUCCESS;
12910}
12911
12912
12913/**
12914 * Fakes and records an I/O port write.
12915 *
12916 * @returns VINF_SUCCESS.
12917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12918 * @param Port The I/O port.
12919 * @param u32Value The value being written.
12920 * @param cbValue The size of the access.
12921 */
12922IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12923{
12924 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12925 if (pEvtRec)
12926 {
12927 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12928 pEvtRec->u.IOPortWrite.Port = Port;
12929 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12930 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12931 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12932 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12933 }
12934 pVCpu->iem.s.cIOWrites++;
12935 return VINF_SUCCESS;
12936}
12937
12938
12939/**
12940 * Used to add extra details about a stub case.
12941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12942 */
12943IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12944{
12945 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12946 PVM pVM = pVCpu->CTX_SUFF(pVM);
12947 PVMCPU pVCpu = pVCpu;
12948 char szRegs[4096];
12949 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12950 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12951 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12952 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12953 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12954 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12955 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12956 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12957 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12958 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12959 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12960 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12961 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12962 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12963 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12964 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12965 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12966 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12967 " efer=%016VR{efer}\n"
12968 " pat=%016VR{pat}\n"
12969 " sf_mask=%016VR{sf_mask}\n"
12970 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12971 " lstar=%016VR{lstar}\n"
12972 " star=%016VR{star} cstar=%016VR{cstar}\n"
12973 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12974 );
12975
12976 char szInstr1[256];
12977 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12978 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12979 szInstr1, sizeof(szInstr1), NULL);
12980 char szInstr2[256];
12981 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12982 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12983 szInstr2, sizeof(szInstr2), NULL);
12984
12985 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12986}
12987
12988
12989/**
12990 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12991 * dump to the assertion info.
12992 *
12993 * @param pEvtRec The record to dump.
12994 */
12995IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12996{
12997 switch (pEvtRec->enmEvent)
12998 {
12999 case IEMVERIFYEVENT_IOPORT_READ:
13000 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13001 pEvtRec->u.IOPortWrite.Port,
13002 pEvtRec->u.IOPortWrite.cbValue);
13003 break;
13004 case IEMVERIFYEVENT_IOPORT_WRITE:
13005 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13006 pEvtRec->u.IOPortWrite.Port,
13007 pEvtRec->u.IOPortWrite.cbValue,
13008 pEvtRec->u.IOPortWrite.u32Value);
13009 break;
13010 case IEMVERIFYEVENT_IOPORT_STR_READ:
13011 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13012 pEvtRec->u.IOPortStrWrite.Port,
13013 pEvtRec->u.IOPortStrWrite.cbValue,
13014 pEvtRec->u.IOPortStrWrite.cTransfers);
13015 break;
13016 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13017 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13018 pEvtRec->u.IOPortStrWrite.Port,
13019 pEvtRec->u.IOPortStrWrite.cbValue,
13020 pEvtRec->u.IOPortStrWrite.cTransfers);
13021 break;
13022 case IEMVERIFYEVENT_RAM_READ:
13023 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13024 pEvtRec->u.RamRead.GCPhys,
13025 pEvtRec->u.RamRead.cb);
13026 break;
13027 case IEMVERIFYEVENT_RAM_WRITE:
13028 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13029 pEvtRec->u.RamWrite.GCPhys,
13030 pEvtRec->u.RamWrite.cb,
13031 (int)pEvtRec->u.RamWrite.cb,
13032 pEvtRec->u.RamWrite.ab);
13033 break;
13034 default:
13035 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13036 break;
13037 }
13038}
13039
13040
13041/**
13042 * Raises an assertion on the specified record, showing the given message with
13043 * a record dump attached.
13044 *
13045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13046 * @param pEvtRec1 The first record.
13047 * @param pEvtRec2 The second record.
13048 * @param pszMsg The message explaining why we're asserting.
13049 */
13050IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13051{
13052 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13053 iemVerifyAssertAddRecordDump(pEvtRec1);
13054 iemVerifyAssertAddRecordDump(pEvtRec2);
13055 iemVerifyAssertMsg2(pVCpu);
13056 RTAssertPanic();
13057}
13058
13059
13060/**
13061 * Raises an assertion on the specified record, showing the given message with
13062 * a record dump attached.
13063 *
13064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13065 * @param pEvtRec1 The first record.
13066 * @param pszMsg The message explaining why we're asserting.
13067 */
13068IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13069{
13070 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13071 iemVerifyAssertAddRecordDump(pEvtRec);
13072 iemVerifyAssertMsg2(pVCpu);
13073 RTAssertPanic();
13074}
13075
13076
13077/**
13078 * Verifies a write record.
13079 *
13080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13081 * @param pEvtRec The write record.
13082 * @param fRem Set if REM was doing the other executing. If clear
13083 * it was HM.
13084 */
13085IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13086{
13087 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13088 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13089 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13090 if ( RT_FAILURE(rc)
13091 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13092 {
13093 /* fend off ins */
13094 if ( !pVCpu->iem.s.cIOReads
13095 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13096 || ( pEvtRec->u.RamWrite.cb != 1
13097 && pEvtRec->u.RamWrite.cb != 2
13098 && pEvtRec->u.RamWrite.cb != 4) )
13099 {
13100 /* fend off ROMs and MMIO */
13101 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13102 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13103 {
13104 /* fend off fxsave */
13105 if (pEvtRec->u.RamWrite.cb != 512)
13106 {
13107 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13108 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13109 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13110 RTAssertMsg2Add("%s: %.*Rhxs\n"
13111 "iem: %.*Rhxs\n",
13112 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13113 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13114 iemVerifyAssertAddRecordDump(pEvtRec);
13115 iemVerifyAssertMsg2(pVCpu);
13116 RTAssertPanic();
13117 }
13118 }
13119 }
13120 }
13121
13122}
13123
13124/**
13125 * Performs the post-execution verfication checks.
13126 */
13127IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13128{
13129 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13130 return rcStrictIem;
13131
13132 /*
13133 * Switch back the state.
13134 */
13135 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13136 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13137 Assert(pOrgCtx != pDebugCtx);
13138 IEM_GET_CTX(pVCpu) = pOrgCtx;
13139
13140 /*
13141 * Execute the instruction in REM.
13142 */
13143 bool fRem = false;
13144 PVM pVM = pVCpu->CTX_SUFF(pVM);
13145 PVMCPU pVCpu = pVCpu;
13146 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13147#ifdef IEM_VERIFICATION_MODE_FULL_HM
13148 if ( HMIsEnabled(pVM)
13149 && pVCpu->iem.s.cIOReads == 0
13150 && pVCpu->iem.s.cIOWrites == 0
13151 && !pVCpu->iem.s.fProblematicMemory)
13152 {
13153 uint64_t uStartRip = pOrgCtx->rip;
13154 unsigned iLoops = 0;
13155 do
13156 {
13157 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13158 iLoops++;
13159 } while ( rc == VINF_SUCCESS
13160 || ( rc == VINF_EM_DBG_STEPPED
13161 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13162 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13163 || ( pOrgCtx->rip != pDebugCtx->rip
13164 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13165 && iLoops < 8) );
13166 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13167 rc = VINF_SUCCESS;
13168 }
13169#endif
13170 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13171 || rc == VINF_IOM_R3_IOPORT_READ
13172 || rc == VINF_IOM_R3_IOPORT_WRITE
13173 || rc == VINF_IOM_R3_MMIO_READ
13174 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13175 || rc == VINF_IOM_R3_MMIO_WRITE
13176 || rc == VINF_CPUM_R3_MSR_READ
13177 || rc == VINF_CPUM_R3_MSR_WRITE
13178 || rc == VINF_EM_RESCHEDULE
13179 )
13180 {
13181 EMRemLock(pVM);
13182 rc = REMR3EmulateInstruction(pVM, pVCpu);
13183 AssertRC(rc);
13184 EMRemUnlock(pVM);
13185 fRem = true;
13186 }
13187
13188# if 1 /* Skip unimplemented instructions for now. */
13189 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13190 {
13191 IEM_GET_CTX(pVCpu) = pOrgCtx;
13192 if (rc == VINF_EM_DBG_STEPPED)
13193 return VINF_SUCCESS;
13194 return rc;
13195 }
13196# endif
13197
13198 /*
13199 * Compare the register states.
13200 */
13201 unsigned cDiffs = 0;
13202 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13203 {
13204 //Log(("REM and IEM ends up with different registers!\n"));
13205 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13206
13207# define CHECK_FIELD(a_Field) \
13208 do \
13209 { \
13210 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13211 { \
13212 switch (sizeof(pOrgCtx->a_Field)) \
13213 { \
13214 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13215 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13216 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13217 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13218 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13219 } \
13220 cDiffs++; \
13221 } \
13222 } while (0)
13223# define CHECK_XSTATE_FIELD(a_Field) \
13224 do \
13225 { \
13226 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13227 { \
13228 switch (sizeof(pOrgXState->a_Field)) \
13229 { \
13230 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13231 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13232 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13233 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13234 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13235 } \
13236 cDiffs++; \
13237 } \
13238 } while (0)
13239
13240# define CHECK_BIT_FIELD(a_Field) \
13241 do \
13242 { \
13243 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13244 { \
13245 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13246 cDiffs++; \
13247 } \
13248 } while (0)
13249
13250# define CHECK_SEL(a_Sel) \
13251 do \
13252 { \
13253 CHECK_FIELD(a_Sel.Sel); \
13254 CHECK_FIELD(a_Sel.Attr.u); \
13255 CHECK_FIELD(a_Sel.u64Base); \
13256 CHECK_FIELD(a_Sel.u32Limit); \
13257 CHECK_FIELD(a_Sel.fFlags); \
13258 } while (0)
13259
13260 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13261 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13262
13263#if 1 /* The recompiler doesn't update these the intel way. */
13264 if (fRem)
13265 {
13266 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13267 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13268 pOrgXState->x87.CS = pDebugXState->x87.CS;
13269 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13270 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13271 pOrgXState->x87.DS = pDebugXState->x87.DS;
13272 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13273 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13274 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13275 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13276 }
13277#endif
13278 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13279 {
13280 RTAssertMsg2Weak(" the FPU state differs\n");
13281 cDiffs++;
13282 CHECK_XSTATE_FIELD(x87.FCW);
13283 CHECK_XSTATE_FIELD(x87.FSW);
13284 CHECK_XSTATE_FIELD(x87.FTW);
13285 CHECK_XSTATE_FIELD(x87.FOP);
13286 CHECK_XSTATE_FIELD(x87.FPUIP);
13287 CHECK_XSTATE_FIELD(x87.CS);
13288 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13289 CHECK_XSTATE_FIELD(x87.FPUDP);
13290 CHECK_XSTATE_FIELD(x87.DS);
13291 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13292 CHECK_XSTATE_FIELD(x87.MXCSR);
13293 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13294 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13295 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13296 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13297 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13298 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13299 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13300 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13301 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13302 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13303 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13304 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13305 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13306 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13307 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13308 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13309 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13310 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13311 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13312 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13313 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13314 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13315 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13316 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13317 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13318 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13319 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13320 }
13321 CHECK_FIELD(rip);
13322 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13323 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13324 {
13325 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13326 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13327 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13328 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13329 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13330 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13331 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13332 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13333 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13334 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13335 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13336 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13337 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13338 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13339 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13340 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13341 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13342 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13343 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13344 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13345 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13346 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13347 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13348 }
13349
13350 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13351 CHECK_FIELD(rax);
13352 CHECK_FIELD(rcx);
13353 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13354 CHECK_FIELD(rdx);
13355 CHECK_FIELD(rbx);
13356 CHECK_FIELD(rsp);
13357 CHECK_FIELD(rbp);
13358 CHECK_FIELD(rsi);
13359 CHECK_FIELD(rdi);
13360 CHECK_FIELD(r8);
13361 CHECK_FIELD(r9);
13362 CHECK_FIELD(r10);
13363 CHECK_FIELD(r11);
13364 CHECK_FIELD(r12);
13365 CHECK_FIELD(r13);
13366 CHECK_SEL(cs);
13367 CHECK_SEL(ss);
13368 CHECK_SEL(ds);
13369 CHECK_SEL(es);
13370 CHECK_SEL(fs);
13371 CHECK_SEL(gs);
13372 CHECK_FIELD(cr0);
13373
13374 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13375 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13376 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13377 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13378 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13379 {
13380 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13381 { /* ignore */ }
13382 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13383 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13384 && fRem)
13385 { /* ignore */ }
13386 else
13387 CHECK_FIELD(cr2);
13388 }
13389 CHECK_FIELD(cr3);
13390 CHECK_FIELD(cr4);
13391 CHECK_FIELD(dr[0]);
13392 CHECK_FIELD(dr[1]);
13393 CHECK_FIELD(dr[2]);
13394 CHECK_FIELD(dr[3]);
13395 CHECK_FIELD(dr[6]);
13396 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13397 CHECK_FIELD(dr[7]);
13398 CHECK_FIELD(gdtr.cbGdt);
13399 CHECK_FIELD(gdtr.pGdt);
13400 CHECK_FIELD(idtr.cbIdt);
13401 CHECK_FIELD(idtr.pIdt);
13402 CHECK_SEL(ldtr);
13403 CHECK_SEL(tr);
13404 CHECK_FIELD(SysEnter.cs);
13405 CHECK_FIELD(SysEnter.eip);
13406 CHECK_FIELD(SysEnter.esp);
13407 CHECK_FIELD(msrEFER);
13408 CHECK_FIELD(msrSTAR);
13409 CHECK_FIELD(msrPAT);
13410 CHECK_FIELD(msrLSTAR);
13411 CHECK_FIELD(msrCSTAR);
13412 CHECK_FIELD(msrSFMASK);
13413 CHECK_FIELD(msrKERNELGSBASE);
13414
13415 if (cDiffs != 0)
13416 {
13417 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13418 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13419 RTAssertPanic();
13420 static bool volatile s_fEnterDebugger = true;
13421 if (s_fEnterDebugger)
13422 DBGFSTOP(pVM);
13423
13424# if 1 /* Ignore unimplemented instructions for now. */
13425 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13426 rcStrictIem = VINF_SUCCESS;
13427# endif
13428 }
13429# undef CHECK_FIELD
13430# undef CHECK_BIT_FIELD
13431 }
13432
13433 /*
13434 * If the register state compared fine, check the verification event
13435 * records.
13436 */
13437 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13438 {
13439 /*
13440 * Compare verficiation event records.
13441 * - I/O port accesses should be a 1:1 match.
13442 */
13443 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13444 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13445 while (pIemRec && pOtherRec)
13446 {
13447 /* Since we might miss RAM writes and reads, ignore reads and check
13448 that any written memory is the same extra ones. */
13449 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13450 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13451 && pIemRec->pNext)
13452 {
13453 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13454 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13455 pIemRec = pIemRec->pNext;
13456 }
13457
13458 /* Do the compare. */
13459 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13460 {
13461 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13462 break;
13463 }
13464 bool fEquals;
13465 switch (pIemRec->enmEvent)
13466 {
13467 case IEMVERIFYEVENT_IOPORT_READ:
13468 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13469 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13470 break;
13471 case IEMVERIFYEVENT_IOPORT_WRITE:
13472 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13473 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13474 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13475 break;
13476 case IEMVERIFYEVENT_IOPORT_STR_READ:
13477 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13478 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13479 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13480 break;
13481 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13482 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13483 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13484 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13485 break;
13486 case IEMVERIFYEVENT_RAM_READ:
13487 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13488 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13489 break;
13490 case IEMVERIFYEVENT_RAM_WRITE:
13491 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13492 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13493 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13494 break;
13495 default:
13496 fEquals = false;
13497 break;
13498 }
13499 if (!fEquals)
13500 {
13501 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13502 break;
13503 }
13504
13505 /* advance */
13506 pIemRec = pIemRec->pNext;
13507 pOtherRec = pOtherRec->pNext;
13508 }
13509
13510 /* Ignore extra writes and reads. */
13511 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13512 {
13513 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13514 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13515 pIemRec = pIemRec->pNext;
13516 }
13517 if (pIemRec != NULL)
13518 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13519 else if (pOtherRec != NULL)
13520 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13521 }
13522 IEM_GET_CTX(pVCpu) = pOrgCtx;
13523
13524 return rcStrictIem;
13525}
13526
13527#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13528
13529/* stubs */
13530IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13531{
13532 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13533 return VERR_INTERNAL_ERROR;
13534}
13535
13536IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13537{
13538 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13539 return VERR_INTERNAL_ERROR;
13540}
13541
13542#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13543
13544
13545#ifdef LOG_ENABLED
13546/**
13547 * Logs the current instruction.
13548 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13549 * @param pCtx The current CPU context.
13550 * @param fSameCtx Set if we have the same context information as the VMM,
13551 * clear if we may have already executed an instruction in
13552 * our debug context. When clear, we assume IEMCPU holds
13553 * valid CPU mode info.
13554 */
13555IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13556{
13557# ifdef IN_RING3
13558 if (LogIs2Enabled())
13559 {
13560 char szInstr[256];
13561 uint32_t cbInstr = 0;
13562 if (fSameCtx)
13563 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13564 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13565 szInstr, sizeof(szInstr), &cbInstr);
13566 else
13567 {
13568 uint32_t fFlags = 0;
13569 switch (pVCpu->iem.s.enmCpuMode)
13570 {
13571 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13572 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13573 case IEMMODE_16BIT:
13574 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13575 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13576 else
13577 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13578 break;
13579 }
13580 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13581 szInstr, sizeof(szInstr), &cbInstr);
13582 }
13583
13584 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13585 Log2(("****\n"
13586 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13587 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13588 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13589 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13590 " %s\n"
13591 ,
13592 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13593 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13594 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13595 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13596 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13597 szInstr));
13598
13599 if (LogIs3Enabled())
13600 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13601 }
13602 else
13603# endif
13604 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13605 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13606 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13607}
13608#endif
13609
13610
13611/**
13612 * Makes status code addjustments (pass up from I/O and access handler)
13613 * as well as maintaining statistics.
13614 *
13615 * @returns Strict VBox status code to pass up.
13616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13617 * @param rcStrict The status from executing an instruction.
13618 */
13619DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13620{
13621 if (rcStrict != VINF_SUCCESS)
13622 {
13623 if (RT_SUCCESS(rcStrict))
13624 {
13625 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13626 || rcStrict == VINF_IOM_R3_IOPORT_READ
13627 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13628 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13629 || rcStrict == VINF_IOM_R3_MMIO_READ
13630 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13631 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13632 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13633 || rcStrict == VINF_CPUM_R3_MSR_READ
13634 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13635 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13636 || rcStrict == VINF_EM_RAW_TO_R3
13637 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13638 /* raw-mode / virt handlers only: */
13639 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13640 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13641 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13642 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13643 || rcStrict == VINF_SELM_SYNC_GDT
13644 || rcStrict == VINF_CSAM_PENDING_ACTION
13645 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13646 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13647/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13648 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13649 if (rcPassUp == VINF_SUCCESS)
13650 pVCpu->iem.s.cRetInfStatuses++;
13651 else if ( rcPassUp < VINF_EM_FIRST
13652 || rcPassUp > VINF_EM_LAST
13653 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13654 {
13655 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13656 pVCpu->iem.s.cRetPassUpStatus++;
13657 rcStrict = rcPassUp;
13658 }
13659 else
13660 {
13661 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13662 pVCpu->iem.s.cRetInfStatuses++;
13663 }
13664 }
13665 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13666 pVCpu->iem.s.cRetAspectNotImplemented++;
13667 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13668 pVCpu->iem.s.cRetInstrNotImplemented++;
13669#ifdef IEM_VERIFICATION_MODE_FULL
13670 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13671 rcStrict = VINF_SUCCESS;
13672#endif
13673 else
13674 pVCpu->iem.s.cRetErrStatuses++;
13675 }
13676 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13677 {
13678 pVCpu->iem.s.cRetPassUpStatus++;
13679 rcStrict = pVCpu->iem.s.rcPassUp;
13680 }
13681
13682 return rcStrict;
13683}
13684
13685
13686/**
13687 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13688 * IEMExecOneWithPrefetchedByPC.
13689 *
13690 * Similar code is found in IEMExecLots.
13691 *
13692 * @return Strict VBox status code.
13693 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13695 * @param fExecuteInhibit If set, execute the instruction following CLI,
13696 * POP SS and MOV SS,GR.
13697 */
13698DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13699{
13700#ifdef IEM_WITH_SETJMP
13701 VBOXSTRICTRC rcStrict;
13702 jmp_buf JmpBuf;
13703 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13704 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13705 if ((rcStrict = setjmp(JmpBuf)) == 0)
13706 {
13707 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13708 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13709 }
13710 else
13711 pVCpu->iem.s.cLongJumps++;
13712 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13713#else
13714 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13715 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13716#endif
13717 if (rcStrict == VINF_SUCCESS)
13718 pVCpu->iem.s.cInstructions++;
13719 if (pVCpu->iem.s.cActiveMappings > 0)
13720 {
13721 Assert(rcStrict != VINF_SUCCESS);
13722 iemMemRollback(pVCpu);
13723 }
13724//#ifdef DEBUG
13725// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13726//#endif
13727
13728 /* Execute the next instruction as well if a cli, pop ss or
13729 mov ss, Gr has just completed successfully. */
13730 if ( fExecuteInhibit
13731 && rcStrict == VINF_SUCCESS
13732 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13733 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13734 {
13735 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13736 if (rcStrict == VINF_SUCCESS)
13737 {
13738#ifdef LOG_ENABLED
13739 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13740#endif
13741#ifdef IEM_WITH_SETJMP
13742 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13743 if ((rcStrict = setjmp(JmpBuf)) == 0)
13744 {
13745 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13746 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13747 }
13748 else
13749 pVCpu->iem.s.cLongJumps++;
13750 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13751#else
13752 IEM_OPCODE_GET_NEXT_U8(&b);
13753 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13754#endif
13755 if (rcStrict == VINF_SUCCESS)
13756 pVCpu->iem.s.cInstructions++;
13757 if (pVCpu->iem.s.cActiveMappings > 0)
13758 {
13759 Assert(rcStrict != VINF_SUCCESS);
13760 iemMemRollback(pVCpu);
13761 }
13762 }
13763 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13764 }
13765
13766 /*
13767 * Return value fiddling, statistics and sanity assertions.
13768 */
13769 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13770
13771 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13772 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13773#if defined(IEM_VERIFICATION_MODE_FULL)
13774 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13776 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13778#endif
13779 return rcStrict;
13780}
13781
13782
13783#ifdef IN_RC
13784/**
13785 * Re-enters raw-mode or ensure we return to ring-3.
13786 *
13787 * @returns rcStrict, maybe modified.
13788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13789 * @param pCtx The current CPU context.
13790 * @param rcStrict The status code returne by the interpreter.
13791 */
13792DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13793{
13794 if ( !pVCpu->iem.s.fInPatchCode
13795 && ( rcStrict == VINF_SUCCESS
13796 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13797 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13798 {
13799 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13800 CPUMRawEnter(pVCpu);
13801 else
13802 {
13803 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13804 rcStrict = VINF_EM_RESCHEDULE;
13805 }
13806 }
13807 return rcStrict;
13808}
13809#endif
13810
13811
13812/**
13813 * Execute one instruction.
13814 *
13815 * @return Strict VBox status code.
13816 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13817 */
13818VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13819{
13820#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13821 if (++pVCpu->iem.s.cVerifyDepth == 1)
13822 iemExecVerificationModeSetup(pVCpu);
13823#endif
13824#ifdef LOG_ENABLED
13825 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13826 iemLogCurInstr(pVCpu, pCtx, true);
13827#endif
13828
13829 /*
13830 * Do the decoding and emulation.
13831 */
13832 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13833 if (rcStrict == VINF_SUCCESS)
13834 rcStrict = iemExecOneInner(pVCpu, true);
13835
13836#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13837 /*
13838 * Assert some sanity.
13839 */
13840 if (pVCpu->iem.s.cVerifyDepth == 1)
13841 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13842 pVCpu->iem.s.cVerifyDepth--;
13843#endif
13844#ifdef IN_RC
13845 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13846#endif
13847 if (rcStrict != VINF_SUCCESS)
13848 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13849 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13850 return rcStrict;
13851}
13852
13853
13854VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13855{
13856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13857 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13858
13859 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13860 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13861 if (rcStrict == VINF_SUCCESS)
13862 {
13863 rcStrict = iemExecOneInner(pVCpu, true);
13864 if (pcbWritten)
13865 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13866 }
13867
13868#ifdef IN_RC
13869 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13870#endif
13871 return rcStrict;
13872}
13873
13874
13875VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13876 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13877{
13878 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13879 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13880
13881 VBOXSTRICTRC rcStrict;
13882 if ( cbOpcodeBytes
13883 && pCtx->rip == OpcodeBytesPC)
13884 {
13885 iemInitDecoder(pVCpu, false);
13886#ifdef IEM_WITH_CODE_TLB
13887 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13888 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13889 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13890 pVCpu->iem.s.offCurInstrStart = 0;
13891 pVCpu->iem.s.offInstrNextByte = 0;
13892#else
13893 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13894 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13895#endif
13896 rcStrict = VINF_SUCCESS;
13897 }
13898 else
13899 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13900 if (rcStrict == VINF_SUCCESS)
13901 {
13902 rcStrict = iemExecOneInner(pVCpu, true);
13903 }
13904
13905#ifdef IN_RC
13906 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13907#endif
13908 return rcStrict;
13909}
13910
13911
13912VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13913{
13914 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13915 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13916
13917 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13918 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13919 if (rcStrict == VINF_SUCCESS)
13920 {
13921 rcStrict = iemExecOneInner(pVCpu, false);
13922 if (pcbWritten)
13923 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13924 }
13925
13926#ifdef IN_RC
13927 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13928#endif
13929 return rcStrict;
13930}
13931
13932
13933VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13934 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13935{
13936 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13937 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13938
13939 VBOXSTRICTRC rcStrict;
13940 if ( cbOpcodeBytes
13941 && pCtx->rip == OpcodeBytesPC)
13942 {
13943 iemInitDecoder(pVCpu, true);
13944#ifdef IEM_WITH_CODE_TLB
13945 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13946 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13947 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13948 pVCpu->iem.s.offCurInstrStart = 0;
13949 pVCpu->iem.s.offInstrNextByte = 0;
13950#else
13951 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13952 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13953#endif
13954 rcStrict = VINF_SUCCESS;
13955 }
13956 else
13957 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13958 if (rcStrict == VINF_SUCCESS)
13959 rcStrict = iemExecOneInner(pVCpu, false);
13960
13961#ifdef IN_RC
13962 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13963#endif
13964 return rcStrict;
13965}
13966
13967
13968/**
13969 * For debugging DISGetParamSize, may come in handy.
13970 *
13971 * @returns Strict VBox status code.
13972 * @param pVCpu The cross context virtual CPU structure of the
13973 * calling EMT.
13974 * @param pCtxCore The context core structure.
13975 * @param OpcodeBytesPC The PC of the opcode bytes.
13976 * @param pvOpcodeBytes Prefeched opcode bytes.
13977 * @param cbOpcodeBytes Number of prefetched bytes.
13978 * @param pcbWritten Where to return the number of bytes written.
13979 * Optional.
13980 */
13981VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13982 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13983 uint32_t *pcbWritten)
13984{
13985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13986 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13987
13988 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13989 VBOXSTRICTRC rcStrict;
13990 if ( cbOpcodeBytes
13991 && pCtx->rip == OpcodeBytesPC)
13992 {
13993 iemInitDecoder(pVCpu, true);
13994#ifdef IEM_WITH_CODE_TLB
13995 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13996 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13997 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13998 pVCpu->iem.s.offCurInstrStart = 0;
13999 pVCpu->iem.s.offInstrNextByte = 0;
14000#else
14001 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14002 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14003#endif
14004 rcStrict = VINF_SUCCESS;
14005 }
14006 else
14007 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14008 if (rcStrict == VINF_SUCCESS)
14009 {
14010 rcStrict = iemExecOneInner(pVCpu, false);
14011 if (pcbWritten)
14012 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14013 }
14014
14015#ifdef IN_RC
14016 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14017#endif
14018 return rcStrict;
14019}
14020
14021
14022VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14023{
14024 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14025
14026#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14027 /*
14028 * See if there is an interrupt pending in TRPM, inject it if we can.
14029 */
14030 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14031# ifdef IEM_VERIFICATION_MODE_FULL
14032 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14033# endif
14034 if ( pCtx->eflags.Bits.u1IF
14035 && TRPMHasTrap(pVCpu)
14036 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14037 {
14038 uint8_t u8TrapNo;
14039 TRPMEVENT enmType;
14040 RTGCUINT uErrCode;
14041 RTGCPTR uCr2;
14042 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14043 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14044 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14045 TRPMResetTrap(pVCpu);
14046 }
14047
14048 /*
14049 * Log the state.
14050 */
14051# ifdef LOG_ENABLED
14052 iemLogCurInstr(pVCpu, pCtx, true);
14053# endif
14054
14055 /*
14056 * Do the decoding and emulation.
14057 */
14058 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14059 if (rcStrict == VINF_SUCCESS)
14060 rcStrict = iemExecOneInner(pVCpu, true);
14061
14062 /*
14063 * Assert some sanity.
14064 */
14065 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14066
14067 /*
14068 * Log and return.
14069 */
14070 if (rcStrict != VINF_SUCCESS)
14071 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14072 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14073 if (pcInstructions)
14074 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14075 return rcStrict;
14076
14077#else /* Not verification mode */
14078
14079 /*
14080 * See if there is an interrupt pending in TRPM, inject it if we can.
14081 */
14082 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14083# ifdef IEM_VERIFICATION_MODE_FULL
14084 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14085# endif
14086 if ( pCtx->eflags.Bits.u1IF
14087 && TRPMHasTrap(pVCpu)
14088 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14089 {
14090 uint8_t u8TrapNo;
14091 TRPMEVENT enmType;
14092 RTGCUINT uErrCode;
14093 RTGCPTR uCr2;
14094 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14095 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14096 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14097 TRPMResetTrap(pVCpu);
14098 }
14099
14100 /*
14101 * Initial decoder init w/ prefetch, then setup setjmp.
14102 */
14103 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14104 if (rcStrict == VINF_SUCCESS)
14105 {
14106# ifdef IEM_WITH_SETJMP
14107 jmp_buf JmpBuf;
14108 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14109 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14110 pVCpu->iem.s.cActiveMappings = 0;
14111 if ((rcStrict = setjmp(JmpBuf)) == 0)
14112# endif
14113 {
14114 /*
14115 * The run loop. We limit ourselves to 4096 instructions right now.
14116 */
14117 PVM pVM = pVCpu->CTX_SUFF(pVM);
14118 uint32_t cInstr = 4096;
14119 for (;;)
14120 {
14121 /*
14122 * Log the state.
14123 */
14124# ifdef LOG_ENABLED
14125 iemLogCurInstr(pVCpu, pCtx, true);
14126# endif
14127
14128 /*
14129 * Do the decoding and emulation.
14130 */
14131 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14132 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14133 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14134 {
14135 Assert(pVCpu->iem.s.cActiveMappings == 0);
14136 pVCpu->iem.s.cInstructions++;
14137 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14138 {
14139 uint32_t fCpu = pVCpu->fLocalForcedActions
14140 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14141 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14142 | VMCPU_FF_TLB_FLUSH
14143# ifdef VBOX_WITH_RAW_MODE
14144 | VMCPU_FF_TRPM_SYNC_IDT
14145 | VMCPU_FF_SELM_SYNC_TSS
14146 | VMCPU_FF_SELM_SYNC_GDT
14147 | VMCPU_FF_SELM_SYNC_LDT
14148# endif
14149 | VMCPU_FF_INHIBIT_INTERRUPTS
14150 | VMCPU_FF_BLOCK_NMIS ));
14151
14152 if (RT_LIKELY( ( !fCpu
14153 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14154 && !pCtx->rflags.Bits.u1IF) )
14155 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14156 {
14157 if (cInstr-- > 0)
14158 {
14159 Assert(pVCpu->iem.s.cActiveMappings == 0);
14160 iemReInitDecoder(pVCpu);
14161 continue;
14162 }
14163 }
14164 }
14165 Assert(pVCpu->iem.s.cActiveMappings == 0);
14166 }
14167 else if (pVCpu->iem.s.cActiveMappings > 0)
14168 iemMemRollback(pVCpu);
14169 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14170 break;
14171 }
14172 }
14173# ifdef IEM_WITH_SETJMP
14174 else
14175 {
14176 if (pVCpu->iem.s.cActiveMappings > 0)
14177 iemMemRollback(pVCpu);
14178 pVCpu->iem.s.cLongJumps++;
14179 }
14180 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14181# endif
14182
14183 /*
14184 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14185 */
14186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14188# if defined(IEM_VERIFICATION_MODE_FULL)
14189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14193# endif
14194 }
14195
14196 /*
14197 * Maybe re-enter raw-mode and log.
14198 */
14199# ifdef IN_RC
14200 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14201# endif
14202 if (rcStrict != VINF_SUCCESS)
14203 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14204 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14205 if (pcInstructions)
14206 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14207 return rcStrict;
14208#endif /* Not verification mode */
14209}
14210
14211
14212
14213/**
14214 * Injects a trap, fault, abort, software interrupt or external interrupt.
14215 *
14216 * The parameter list matches TRPMQueryTrapAll pretty closely.
14217 *
14218 * @returns Strict VBox status code.
14219 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14220 * @param u8TrapNo The trap number.
14221 * @param enmType What type is it (trap/fault/abort), software
14222 * interrupt or hardware interrupt.
14223 * @param uErrCode The error code if applicable.
14224 * @param uCr2 The CR2 value if applicable.
14225 * @param cbInstr The instruction length (only relevant for
14226 * software interrupts).
14227 */
14228VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14229 uint8_t cbInstr)
14230{
14231 iemInitDecoder(pVCpu, false);
14232#ifdef DBGFTRACE_ENABLED
14233 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14234 u8TrapNo, enmType, uErrCode, uCr2);
14235#endif
14236
14237 uint32_t fFlags;
14238 switch (enmType)
14239 {
14240 case TRPM_HARDWARE_INT:
14241 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14242 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14243 uErrCode = uCr2 = 0;
14244 break;
14245
14246 case TRPM_SOFTWARE_INT:
14247 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14248 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14249 uErrCode = uCr2 = 0;
14250 break;
14251
14252 case TRPM_TRAP:
14253 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14254 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14255 if (u8TrapNo == X86_XCPT_PF)
14256 fFlags |= IEM_XCPT_FLAGS_CR2;
14257 switch (u8TrapNo)
14258 {
14259 case X86_XCPT_DF:
14260 case X86_XCPT_TS:
14261 case X86_XCPT_NP:
14262 case X86_XCPT_SS:
14263 case X86_XCPT_PF:
14264 case X86_XCPT_AC:
14265 fFlags |= IEM_XCPT_FLAGS_ERR;
14266 break;
14267
14268 case X86_XCPT_NMI:
14269 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14270 break;
14271 }
14272 break;
14273
14274 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14275 }
14276
14277 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14278}
14279
14280
14281/**
14282 * Injects the active TRPM event.
14283 *
14284 * @returns Strict VBox status code.
14285 * @param pVCpu The cross context virtual CPU structure.
14286 */
14287VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14288{
14289#ifndef IEM_IMPLEMENTS_TASKSWITCH
14290 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14291#else
14292 uint8_t u8TrapNo;
14293 TRPMEVENT enmType;
14294 RTGCUINT uErrCode;
14295 RTGCUINTPTR uCr2;
14296 uint8_t cbInstr;
14297 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14298 if (RT_FAILURE(rc))
14299 return rc;
14300
14301 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14302
14303 /** @todo Are there any other codes that imply the event was successfully
14304 * delivered to the guest? See @bugref{6607}. */
14305 if ( rcStrict == VINF_SUCCESS
14306 || rcStrict == VINF_IEM_RAISED_XCPT)
14307 {
14308 TRPMResetTrap(pVCpu);
14309 }
14310 return rcStrict;
14311#endif
14312}
14313
14314
14315VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14316{
14317 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14318 return VERR_NOT_IMPLEMENTED;
14319}
14320
14321
14322VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14323{
14324 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14325 return VERR_NOT_IMPLEMENTED;
14326}
14327
14328
14329#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14330/**
14331 * Executes a IRET instruction with default operand size.
14332 *
14333 * This is for PATM.
14334 *
14335 * @returns VBox status code.
14336 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14337 * @param pCtxCore The register frame.
14338 */
14339VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14340{
14341 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14342
14343 iemCtxCoreToCtx(pCtx, pCtxCore);
14344 iemInitDecoder(pVCpu);
14345 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14346 if (rcStrict == VINF_SUCCESS)
14347 iemCtxToCtxCore(pCtxCore, pCtx);
14348 else
14349 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14350 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14351 return rcStrict;
14352}
14353#endif
14354
14355
14356/**
14357 * Macro used by the IEMExec* method to check the given instruction length.
14358 *
14359 * Will return on failure!
14360 *
14361 * @param a_cbInstr The given instruction length.
14362 * @param a_cbMin The minimum length.
14363 */
14364#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14365 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14366 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14367
14368
14369/**
14370 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14371 *
14372 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14373 *
14374 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14376 * @param rcStrict The status code to fiddle.
14377 */
14378DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14379{
14380 iemUninitExec(pVCpu);
14381#ifdef IN_RC
14382 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14383 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14384#else
14385 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14386#endif
14387}
14388
14389
14390/**
14391 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14392 *
14393 * This API ASSUMES that the caller has already verified that the guest code is
14394 * allowed to access the I/O port. (The I/O port is in the DX register in the
14395 * guest state.)
14396 *
14397 * @returns Strict VBox status code.
14398 * @param pVCpu The cross context virtual CPU structure.
14399 * @param cbValue The size of the I/O port access (1, 2, or 4).
14400 * @param enmAddrMode The addressing mode.
14401 * @param fRepPrefix Indicates whether a repeat prefix is used
14402 * (doesn't matter which for this instruction).
14403 * @param cbInstr The instruction length in bytes.
14404 * @param iEffSeg The effective segment address.
14405 * @param fIoChecked Whether the access to the I/O port has been
14406 * checked or not. It's typically checked in the
14407 * HM scenario.
14408 */
14409VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14410 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14411{
14412 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14414
14415 /*
14416 * State init.
14417 */
14418 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14419
14420 /*
14421 * Switch orgy for getting to the right handler.
14422 */
14423 VBOXSTRICTRC rcStrict;
14424 if (fRepPrefix)
14425 {
14426 switch (enmAddrMode)
14427 {
14428 case IEMMODE_16BIT:
14429 switch (cbValue)
14430 {
14431 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14432 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14433 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14434 default:
14435 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14436 }
14437 break;
14438
14439 case IEMMODE_32BIT:
14440 switch (cbValue)
14441 {
14442 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14443 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14444 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14445 default:
14446 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14447 }
14448 break;
14449
14450 case IEMMODE_64BIT:
14451 switch (cbValue)
14452 {
14453 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14454 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14455 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14456 default:
14457 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14458 }
14459 break;
14460
14461 default:
14462 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14463 }
14464 }
14465 else
14466 {
14467 switch (enmAddrMode)
14468 {
14469 case IEMMODE_16BIT:
14470 switch (cbValue)
14471 {
14472 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14473 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14474 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14475 default:
14476 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14477 }
14478 break;
14479
14480 case IEMMODE_32BIT:
14481 switch (cbValue)
14482 {
14483 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14484 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14485 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14486 default:
14487 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14488 }
14489 break;
14490
14491 case IEMMODE_64BIT:
14492 switch (cbValue)
14493 {
14494 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14495 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14496 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14497 default:
14498 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14499 }
14500 break;
14501
14502 default:
14503 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14504 }
14505 }
14506
14507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14508}
14509
14510
14511/**
14512 * Interface for HM and EM for executing string I/O IN (read) instructions.
14513 *
14514 * This API ASSUMES that the caller has already verified that the guest code is
14515 * allowed to access the I/O port. (The I/O port is in the DX register in the
14516 * guest state.)
14517 *
14518 * @returns Strict VBox status code.
14519 * @param pVCpu The cross context virtual CPU structure.
14520 * @param cbValue The size of the I/O port access (1, 2, or 4).
14521 * @param enmAddrMode The addressing mode.
14522 * @param fRepPrefix Indicates whether a repeat prefix is used
14523 * (doesn't matter which for this instruction).
14524 * @param cbInstr The instruction length in bytes.
14525 * @param fIoChecked Whether the access to the I/O port has been
14526 * checked or not. It's typically checked in the
14527 * HM scenario.
14528 */
14529VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14530 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14531{
14532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14533
14534 /*
14535 * State init.
14536 */
14537 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14538
14539 /*
14540 * Switch orgy for getting to the right handler.
14541 */
14542 VBOXSTRICTRC rcStrict;
14543 if (fRepPrefix)
14544 {
14545 switch (enmAddrMode)
14546 {
14547 case IEMMODE_16BIT:
14548 switch (cbValue)
14549 {
14550 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14551 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14552 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14553 default:
14554 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14555 }
14556 break;
14557
14558 case IEMMODE_32BIT:
14559 switch (cbValue)
14560 {
14561 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14562 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14563 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14564 default:
14565 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14566 }
14567 break;
14568
14569 case IEMMODE_64BIT:
14570 switch (cbValue)
14571 {
14572 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14573 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14574 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14575 default:
14576 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14577 }
14578 break;
14579
14580 default:
14581 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14582 }
14583 }
14584 else
14585 {
14586 switch (enmAddrMode)
14587 {
14588 case IEMMODE_16BIT:
14589 switch (cbValue)
14590 {
14591 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14592 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14593 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14594 default:
14595 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14596 }
14597 break;
14598
14599 case IEMMODE_32BIT:
14600 switch (cbValue)
14601 {
14602 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14603 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14604 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14605 default:
14606 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14607 }
14608 break;
14609
14610 case IEMMODE_64BIT:
14611 switch (cbValue)
14612 {
14613 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14614 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14615 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14616 default:
14617 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14618 }
14619 break;
14620
14621 default:
14622 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14623 }
14624 }
14625
14626 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14627}
14628
14629
14630/**
14631 * Interface for rawmode to write execute an OUT instruction.
14632 *
14633 * @returns Strict VBox status code.
14634 * @param pVCpu The cross context virtual CPU structure.
14635 * @param cbInstr The instruction length in bytes.
14636 * @param u16Port The port to read.
14637 * @param cbReg The register size.
14638 *
14639 * @remarks In ring-0 not all of the state needs to be synced in.
14640 */
14641VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14642{
14643 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14644 Assert(cbReg <= 4 && cbReg != 3);
14645
14646 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14647 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14648 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14649}
14650
14651
14652/**
14653 * Interface for rawmode to write execute an IN instruction.
14654 *
14655 * @returns Strict VBox status code.
14656 * @param pVCpu The cross context virtual CPU structure.
14657 * @param cbInstr The instruction length in bytes.
14658 * @param u16Port The port to read.
14659 * @param cbReg The register size.
14660 */
14661VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14662{
14663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14664 Assert(cbReg <= 4 && cbReg != 3);
14665
14666 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14667 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14668 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14669}
14670
14671
14672/**
14673 * Interface for HM and EM to write to a CRx register.
14674 *
14675 * @returns Strict VBox status code.
14676 * @param pVCpu The cross context virtual CPU structure.
14677 * @param cbInstr The instruction length in bytes.
14678 * @param iCrReg The control register number (destination).
14679 * @param iGReg The general purpose register number (source).
14680 *
14681 * @remarks In ring-0 not all of the state needs to be synced in.
14682 */
14683VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14684{
14685 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14686 Assert(iCrReg < 16);
14687 Assert(iGReg < 16);
14688
14689 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14690 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14691 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14692}
14693
14694
14695/**
14696 * Interface for HM and EM to read from a CRx register.
14697 *
14698 * @returns Strict VBox status code.
14699 * @param pVCpu The cross context virtual CPU structure.
14700 * @param cbInstr The instruction length in bytes.
14701 * @param iGReg The general purpose register number (destination).
14702 * @param iCrReg The control register number (source).
14703 *
14704 * @remarks In ring-0 not all of the state needs to be synced in.
14705 */
14706VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14707{
14708 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14709 Assert(iCrReg < 16);
14710 Assert(iGReg < 16);
14711
14712 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14713 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14714 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14715}
14716
14717
14718/**
14719 * Interface for HM and EM to clear the CR0[TS] bit.
14720 *
14721 * @returns Strict VBox status code.
14722 * @param pVCpu The cross context virtual CPU structure.
14723 * @param cbInstr The instruction length in bytes.
14724 *
14725 * @remarks In ring-0 not all of the state needs to be synced in.
14726 */
14727VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14728{
14729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14730
14731 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14732 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14733 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14734}
14735
14736
14737/**
14738 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14739 *
14740 * @returns Strict VBox status code.
14741 * @param pVCpu The cross context virtual CPU structure.
14742 * @param cbInstr The instruction length in bytes.
14743 * @param uValue The value to load into CR0.
14744 *
14745 * @remarks In ring-0 not all of the state needs to be synced in.
14746 */
14747VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14748{
14749 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14750
14751 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14752 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14753 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14754}
14755
14756
14757/**
14758 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14759 *
14760 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14761 *
14762 * @returns Strict VBox status code.
14763 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14764 * @param cbInstr The instruction length in bytes.
14765 * @remarks In ring-0 not all of the state needs to be synced in.
14766 * @thread EMT(pVCpu)
14767 */
14768VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14769{
14770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14771
14772 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14773 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14774 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14775}
14776
14777#ifdef IN_RING3
14778
14779/**
14780 * Handles the unlikely and probably fatal merge cases.
14781 *
14782 * @returns Merged status code.
14783 * @param rcStrict Current EM status code.
14784 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14785 * with @a rcStrict.
14786 * @param iMemMap The memory mapping index. For error reporting only.
14787 * @param pVCpu The cross context virtual CPU structure of the calling
14788 * thread, for error reporting only.
14789 */
14790DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14791 unsigned iMemMap, PVMCPU pVCpu)
14792{
14793 if (RT_FAILURE_NP(rcStrict))
14794 return rcStrict;
14795
14796 if (RT_FAILURE_NP(rcStrictCommit))
14797 return rcStrictCommit;
14798
14799 if (rcStrict == rcStrictCommit)
14800 return rcStrictCommit;
14801
14802 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14803 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14804 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14807 return VERR_IOM_FF_STATUS_IPE;
14808}
14809
14810
14811/**
14812 * Helper for IOMR3ProcessForceFlag.
14813 *
14814 * @returns Merged status code.
14815 * @param rcStrict Current EM status code.
14816 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14817 * with @a rcStrict.
14818 * @param iMemMap The memory mapping index. For error reporting only.
14819 * @param pVCpu The cross context virtual CPU structure of the calling
14820 * thread, for error reporting only.
14821 */
14822DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14823{
14824 /* Simple. */
14825 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14826 return rcStrictCommit;
14827
14828 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14829 return rcStrict;
14830
14831 /* EM scheduling status codes. */
14832 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14833 && rcStrict <= VINF_EM_LAST))
14834 {
14835 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14836 && rcStrictCommit <= VINF_EM_LAST))
14837 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14838 }
14839
14840 /* Unlikely */
14841 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14842}
14843
14844
14845/**
14846 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14847 *
14848 * @returns Merge between @a rcStrict and what the commit operation returned.
14849 * @param pVM The cross context VM structure.
14850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14851 * @param rcStrict The status code returned by ring-0 or raw-mode.
14852 */
14853VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14854{
14855 /*
14856 * Reset the pending commit.
14857 */
14858 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14859 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14860 ("%#x %#x %#x\n",
14861 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14862 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14863
14864 /*
14865 * Commit the pending bounce buffers (usually just one).
14866 */
14867 unsigned cBufs = 0;
14868 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14869 while (iMemMap-- > 0)
14870 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14871 {
14872 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14873 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14874 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14875
14876 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14877 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14878 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14879
14880 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14881 {
14882 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14884 pbBuf,
14885 cbFirst,
14886 PGMACCESSORIGIN_IEM);
14887 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14888 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14889 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14890 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14891 }
14892
14893 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14894 {
14895 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14897 pbBuf + cbFirst,
14898 cbSecond,
14899 PGMACCESSORIGIN_IEM);
14900 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14901 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14902 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14903 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14904 }
14905 cBufs++;
14906 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14907 }
14908
14909 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14910 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14911 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14912 pVCpu->iem.s.cActiveMappings = 0;
14913 return rcStrict;
14914}
14915
14916#endif /* IN_RING3 */
14917
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette