VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65644

Last change on this file since 65644 was 65631, checked in by vboxsync, 8 years ago

IEM: When loading a 16-bit stack from TSS, consistently force a 16-bit stack pointer.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 576.4 KB
Line 
1/* $Id: IEMAll.cpp 65631 2017-02-06 17:38:05Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.idxPrefix = 127;
878 pVCpu->iem.s.uVex3rdReg = 127;
879 pVCpu->iem.s.uVexLength = 127;
880 pVCpu->iem.s.fEvexStuff = 127;
881 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
882# ifdef IEM_WITH_CODE_TLB
883 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
884 pVCpu->iem.s.pbInstrBuf = NULL;
885 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
886 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
887 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
888 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
889# else
890 pVCpu->iem.s.offOpcode = 127;
891 pVCpu->iem.s.cbOpcode = 127;
892# endif
893#endif
894
895 pVCpu->iem.s.cActiveMappings = 0;
896 pVCpu->iem.s.iNextMapping = 0;
897 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
898 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
899#ifdef VBOX_WITH_RAW_MODE_NOT_R0
900 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
901 && pCtx->cs.u64Base == 0
902 && pCtx->cs.u32Limit == UINT32_MAX
903 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
904 if (!pVCpu->iem.s.fInPatchCode)
905 CPUMRawLeave(pVCpu, VINF_SUCCESS);
906#endif
907
908#ifdef IEM_VERIFICATION_MODE_FULL
909 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
910 pVCpu->iem.s.fNoRem = true;
911#endif
912}
913
914
915/**
916 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
917 *
918 * @param pVCpu The cross context virtual CPU structure of the
919 * calling thread.
920 */
921DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
922{
923 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
924#ifdef IEM_VERIFICATION_MODE_FULL
925 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
926#endif
927#ifdef VBOX_STRICT
928# ifdef IEM_WITH_CODE_TLB
929 NOREF(pVCpu);
930# else
931 pVCpu->iem.s.cbOpcode = 0;
932# endif
933#else
934 NOREF(pVCpu);
935#endif
936}
937
938
939/**
940 * Initializes the decoder state.
941 *
942 * iemReInitDecoder is mostly a copy of this function.
943 *
944 * @param pVCpu The cross context virtual CPU structure of the
945 * calling thread.
946 * @param fBypassHandlers Whether to bypass access handlers.
947 */
948DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
949{
950 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
951
952 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
953
954#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
959 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
960 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
961 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
962 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
963#endif
964
965#ifdef VBOX_WITH_RAW_MODE_NOT_R0
966 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
967#endif
968 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
969#ifdef IEM_VERIFICATION_MODE_FULL
970 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
971 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
972#endif
973 IEMMODE enmMode = iemCalcCpuMode(pCtx);
974 pVCpu->iem.s.enmCpuMode = enmMode;
975 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
976 pVCpu->iem.s.enmEffAddrMode = enmMode;
977 if (enmMode != IEMMODE_64BIT)
978 {
979 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
980 pVCpu->iem.s.enmEffOpSize = enmMode;
981 }
982 else
983 {
984 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
985 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
986 }
987 pVCpu->iem.s.fPrefixes = 0;
988 pVCpu->iem.s.uRexReg = 0;
989 pVCpu->iem.s.uRexB = 0;
990 pVCpu->iem.s.uRexIndex = 0;
991 pVCpu->iem.s.idxPrefix = 0;
992 pVCpu->iem.s.uVex3rdReg = 0;
993 pVCpu->iem.s.uVexLength = 0;
994 pVCpu->iem.s.fEvexStuff = 0;
995 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
996#ifdef IEM_WITH_CODE_TLB
997 pVCpu->iem.s.pbInstrBuf = NULL;
998 pVCpu->iem.s.offInstrNextByte = 0;
999 pVCpu->iem.s.offCurInstrStart = 0;
1000# ifdef VBOX_STRICT
1001 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1002 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1003 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1004# endif
1005#else
1006 pVCpu->iem.s.offOpcode = 0;
1007 pVCpu->iem.s.cbOpcode = 0;
1008#endif
1009 pVCpu->iem.s.cActiveMappings = 0;
1010 pVCpu->iem.s.iNextMapping = 0;
1011 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1012 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1013#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1014 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1015 && pCtx->cs.u64Base == 0
1016 && pCtx->cs.u32Limit == UINT32_MAX
1017 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1018 if (!pVCpu->iem.s.fInPatchCode)
1019 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1020#endif
1021
1022#ifdef DBGFTRACE_ENABLED
1023 switch (enmMode)
1024 {
1025 case IEMMODE_64BIT:
1026 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1027 break;
1028 case IEMMODE_32BIT:
1029 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1030 break;
1031 case IEMMODE_16BIT:
1032 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1033 break;
1034 }
1035#endif
1036}
1037
1038
1039/**
1040 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1041 *
1042 * This is mostly a copy of iemInitDecoder.
1043 *
1044 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1045 */
1046DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1047{
1048 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1049
1050 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1051
1052#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1053 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1054 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1055 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1056 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1057 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1058 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1059 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1061#endif
1062
1063 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1064#ifdef IEM_VERIFICATION_MODE_FULL
1065 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1066 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1067#endif
1068 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1069 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1070 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1071 pVCpu->iem.s.enmEffAddrMode = enmMode;
1072 if (enmMode != IEMMODE_64BIT)
1073 {
1074 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1075 pVCpu->iem.s.enmEffOpSize = enmMode;
1076 }
1077 else
1078 {
1079 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1080 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1081 }
1082 pVCpu->iem.s.fPrefixes = 0;
1083 pVCpu->iem.s.uRexReg = 0;
1084 pVCpu->iem.s.uRexB = 0;
1085 pVCpu->iem.s.uRexIndex = 0;
1086 pVCpu->iem.s.idxPrefix = 0;
1087 pVCpu->iem.s.uVex3rdReg = 0;
1088 pVCpu->iem.s.uVexLength = 0;
1089 pVCpu->iem.s.fEvexStuff = 0;
1090 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1091#ifdef IEM_WITH_CODE_TLB
1092 if (pVCpu->iem.s.pbInstrBuf)
1093 {
1094 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1095 - pVCpu->iem.s.uInstrBufPc;
1096 if (off < pVCpu->iem.s.cbInstrBufTotal)
1097 {
1098 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1099 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1100 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1101 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1102 else
1103 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1104 }
1105 else
1106 {
1107 pVCpu->iem.s.pbInstrBuf = NULL;
1108 pVCpu->iem.s.offInstrNextByte = 0;
1109 pVCpu->iem.s.offCurInstrStart = 0;
1110 pVCpu->iem.s.cbInstrBuf = 0;
1111 pVCpu->iem.s.cbInstrBufTotal = 0;
1112 }
1113 }
1114 else
1115 {
1116 pVCpu->iem.s.offInstrNextByte = 0;
1117 pVCpu->iem.s.offCurInstrStart = 0;
1118 pVCpu->iem.s.cbInstrBuf = 0;
1119 pVCpu->iem.s.cbInstrBufTotal = 0;
1120 }
1121#else
1122 pVCpu->iem.s.cbOpcode = 0;
1123 pVCpu->iem.s.offOpcode = 0;
1124#endif
1125 Assert(pVCpu->iem.s.cActiveMappings == 0);
1126 pVCpu->iem.s.iNextMapping = 0;
1127 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1128 Assert(pVCpu->iem.s.fBypassHandlers == false);
1129#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1130 if (!pVCpu->iem.s.fInPatchCode)
1131 { /* likely */ }
1132 else
1133 {
1134 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1135 && pCtx->cs.u64Base == 0
1136 && pCtx->cs.u32Limit == UINT32_MAX
1137 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1138 if (!pVCpu->iem.s.fInPatchCode)
1139 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1140 }
1141#endif
1142
1143#ifdef DBGFTRACE_ENABLED
1144 switch (enmMode)
1145 {
1146 case IEMMODE_64BIT:
1147 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1148 break;
1149 case IEMMODE_32BIT:
1150 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1151 break;
1152 case IEMMODE_16BIT:
1153 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1154 break;
1155 }
1156#endif
1157}
1158
1159
1160
1161/**
1162 * Prefetch opcodes the first time when starting executing.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVCpu The cross context virtual CPU structure of the
1166 * calling thread.
1167 * @param fBypassHandlers Whether to bypass access handlers.
1168 */
1169IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1170{
1171#ifdef IEM_VERIFICATION_MODE_FULL
1172 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1173#endif
1174 iemInitDecoder(pVCpu, fBypassHandlers);
1175
1176#ifdef IEM_WITH_CODE_TLB
1177 /** @todo Do ITLB lookup here. */
1178
1179#else /* !IEM_WITH_CODE_TLB */
1180
1181 /*
1182 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1183 *
1184 * First translate CS:rIP to a physical address.
1185 */
1186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1187 uint32_t cbToTryRead;
1188 RTGCPTR GCPtrPC;
1189 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1190 {
1191 cbToTryRead = PAGE_SIZE;
1192 GCPtrPC = pCtx->rip;
1193 if (IEM_IS_CANONICAL(GCPtrPC))
1194 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1195 else
1196 return iemRaiseGeneralProtectionFault0(pVCpu);
1197 }
1198 else
1199 {
1200 uint32_t GCPtrPC32 = pCtx->eip;
1201 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1202 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1203 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1204 else
1205 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1206 if (cbToTryRead) { /* likely */ }
1207 else /* overflowed */
1208 {
1209 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1210 cbToTryRead = UINT32_MAX;
1211 }
1212 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1213 Assert(GCPtrPC <= UINT32_MAX);
1214 }
1215
1216# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1217 /* Allow interpretation of patch manager code blocks since they can for
1218 instance throw #PFs for perfectly good reasons. */
1219 if (pVCpu->iem.s.fInPatchCode)
1220 {
1221 size_t cbRead = 0;
1222 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1223 AssertRCReturn(rc, rc);
1224 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1225 return VINF_SUCCESS;
1226 }
1227# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1228
1229 RTGCPHYS GCPhys;
1230 uint64_t fFlags;
1231 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1232 if (RT_SUCCESS(rc)) { /* probable */ }
1233 else
1234 {
1235 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1236 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1237 }
1238 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1239 else
1240 {
1241 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1242 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1243 }
1244 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1245 else
1246 {
1247 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1248 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1249 }
1250 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255# ifdef IEM_VERIFICATION_MODE_FULL
1256 /*
1257 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1258 * instruction.
1259 */
1260 /** @todo optimize this differently by not using PGMPhysRead. */
1261 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1262 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1263 if ( offPrevOpcodes < cbOldOpcodes
1264 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1265 {
1266 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1267 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1268 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1269 pVCpu->iem.s.cbOpcode = cbNew;
1270 return VINF_SUCCESS;
1271 }
1272# endif
1273
1274 /*
1275 * Read the bytes at this address.
1276 */
1277 PVM pVM = pVCpu->CTX_SUFF(pVM);
1278# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1279 size_t cbActual;
1280 if ( PATMIsEnabled(pVM)
1281 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1282 {
1283 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1284 Assert(cbActual > 0);
1285 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1286 }
1287 else
1288# endif
1289 {
1290 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1291 if (cbToTryRead > cbLeftOnPage)
1292 cbToTryRead = cbLeftOnPage;
1293 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1294 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1295
1296 if (!pVCpu->iem.s.fBypassHandlers)
1297 {
1298 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1299 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1300 { /* likely */ }
1301 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1304 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1305 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1306 }
1307 else
1308 {
1309 Log((RT_SUCCESS(rcStrict)
1310 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1311 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1312 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1313 return rcStrict;
1314 }
1315 }
1316 else
1317 {
1318 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1319 if (RT_SUCCESS(rc))
1320 { /* likely */ }
1321 else
1322 {
1323 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1324 GCPtrPC, GCPhys, rc, cbToTryRead));
1325 return rc;
1326 }
1327 }
1328 pVCpu->iem.s.cbOpcode = cbToTryRead;
1329 }
1330#endif /* !IEM_WITH_CODE_TLB */
1331 return VINF_SUCCESS;
1332}
1333
1334
1335/**
1336 * Invalidates the IEM TLBs.
1337 *
1338 * This is called internally as well as by PGM when moving GC mappings.
1339 *
1340 * @returns
1341 * @param pVCpu The cross context virtual CPU structure of the calling
1342 * thread.
1343 * @param fVmm Set when PGM calls us with a remapping.
1344 */
1345VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1346{
1347#ifdef IEM_WITH_CODE_TLB
1348 pVCpu->iem.s.cbInstrBufTotal = 0;
1349 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1350 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1351 { /* very likely */ }
1352 else
1353 {
1354 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1355 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1356 while (i-- > 0)
1357 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1358 }
1359#endif
1360
1361#ifdef IEM_WITH_DATA_TLB
1362 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1363 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1364 { /* very likely */ }
1365 else
1366 {
1367 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1368 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1369 while (i-- > 0)
1370 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1371 }
1372#endif
1373 NOREF(pVCpu); NOREF(fVmm);
1374}
1375
1376
1377/**
1378 * Invalidates a page in the TLBs.
1379 *
1380 * @param pVCpu The cross context virtual CPU structure of the calling
1381 * thread.
1382 * @param GCPtr The address of the page to invalidate
1383 */
1384VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1385{
1386#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1387 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1388 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1389 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1390 uintptr_t idx = (uint8_t)GCPtr;
1391
1392# ifdef IEM_WITH_CODE_TLB
1393 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1394 {
1395 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1396 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1397 pVCpu->iem.s.cbInstrBufTotal = 0;
1398 }
1399# endif
1400
1401# ifdef IEM_WITH_DATA_TLB
1402 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1403 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1404# endif
1405#else
1406 NOREF(pVCpu); NOREF(GCPtr);
1407#endif
1408}
1409
1410
1411/**
1412 * Invalidates the host physical aspects of the IEM TLBs.
1413 *
1414 * This is called internally as well as by PGM when moving GC mappings.
1415 *
1416 * @param pVCpu The cross context virtual CPU structure of the calling
1417 * thread.
1418 */
1419VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1420{
1421#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1422 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1423
1424# ifdef IEM_WITH_CODE_TLB
1425 pVCpu->iem.s.cbInstrBufTotal = 0;
1426# endif
1427 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1428 if (uTlbPhysRev != 0)
1429 {
1430 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1431 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1432 }
1433 else
1434 {
1435 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1436 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1437
1438 unsigned i;
1439# ifdef IEM_WITH_CODE_TLB
1440 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1441 while (i-- > 0)
1442 {
1443 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1444 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1445 }
1446# endif
1447# ifdef IEM_WITH_DATA_TLB
1448 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1449 while (i-- > 0)
1450 {
1451 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1452 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1453 }
1454# endif
1455 }
1456#else
1457 NOREF(pVCpu);
1458#endif
1459}
1460
1461
1462/**
1463 * Invalidates the host physical aspects of the IEM TLBs.
1464 *
1465 * This is called internally as well as by PGM when moving GC mappings.
1466 *
1467 * @param pVM The cross context VM structure.
1468 *
1469 * @remarks Caller holds the PGM lock.
1470 */
1471VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1472{
1473 RT_NOREF_PV(pVM);
1474}
1475
1476#ifdef IEM_WITH_CODE_TLB
1477
1478/**
1479 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1480 * failure and jumps.
1481 *
1482 * We end up here for a number of reasons:
1483 * - pbInstrBuf isn't yet initialized.
1484 * - Advancing beyond the buffer boundrary (e.g. cross page).
1485 * - Advancing beyond the CS segment limit.
1486 * - Fetching from non-mappable page (e.g. MMIO).
1487 *
1488 * @param pVCpu The cross context virtual CPU structure of the
1489 * calling thread.
1490 * @param pvDst Where to return the bytes.
1491 * @param cbDst Number of bytes to read.
1492 *
1493 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1494 */
1495IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1496{
1497#ifdef IN_RING3
1498//__debugbreak();
1499 for (;;)
1500 {
1501 Assert(cbDst <= 8);
1502 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1503
1504 /*
1505 * We might have a partial buffer match, deal with that first to make the
1506 * rest simpler. This is the first part of the cross page/buffer case.
1507 */
1508 if (pVCpu->iem.s.pbInstrBuf != NULL)
1509 {
1510 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1511 {
1512 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1513 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1514 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1515
1516 cbDst -= cbCopy;
1517 pvDst = (uint8_t *)pvDst + cbCopy;
1518 offBuf += cbCopy;
1519 pVCpu->iem.s.offInstrNextByte += offBuf;
1520 }
1521 }
1522
1523 /*
1524 * Check segment limit, figuring how much we're allowed to access at this point.
1525 *
1526 * We will fault immediately if RIP is past the segment limit / in non-canonical
1527 * territory. If we do continue, there are one or more bytes to read before we
1528 * end up in trouble and we need to do that first before faulting.
1529 */
1530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1531 RTGCPTR GCPtrFirst;
1532 uint32_t cbMaxRead;
1533 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1534 {
1535 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1536 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1537 { /* likely */ }
1538 else
1539 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1540 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1541 }
1542 else
1543 {
1544 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1545 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1546 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1547 { /* likely */ }
1548 else
1549 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1550 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1551 if (cbMaxRead != 0)
1552 { /* likely */ }
1553 else
1554 {
1555 /* Overflowed because address is 0 and limit is max. */
1556 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1557 cbMaxRead = X86_PAGE_SIZE;
1558 }
1559 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1560 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1561 if (cbMaxRead2 < cbMaxRead)
1562 cbMaxRead = cbMaxRead2;
1563 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1564 }
1565
1566 /*
1567 * Get the TLB entry for this piece of code.
1568 */
1569 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1572 if (pTlbe->uTag == uTag)
1573 {
1574 /* likely when executing lots of code, otherwise unlikely */
1575# ifdef VBOX_WITH_STATISTICS
1576 pVCpu->iem.s.CodeTlb.cTlbHits++;
1577# endif
1578 }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1582# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1583 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1584 {
1585 pTlbe->uTag = uTag;
1586 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1587 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1588 pTlbe->GCPhys = NIL_RTGCPHYS;
1589 pTlbe->pbMappingR3 = NULL;
1590 }
1591 else
1592# endif
1593 {
1594 RTGCPHYS GCPhys;
1595 uint64_t fFlags;
1596 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1597 if (RT_FAILURE(rc))
1598 {
1599 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1600 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1601 }
1602
1603 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1604 pTlbe->uTag = uTag;
1605 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1606 pTlbe->GCPhys = GCPhys;
1607 pTlbe->pbMappingR3 = NULL;
1608 }
1609 }
1610
1611 /*
1612 * Check TLB page table level access flags.
1613 */
1614 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1615 {
1616 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1617 {
1618 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1619 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1620 }
1621 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1622 {
1623 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1624 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1625 }
1626 }
1627
1628# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1629 /*
1630 * Allow interpretation of patch manager code blocks since they can for
1631 * instance throw #PFs for perfectly good reasons.
1632 */
1633 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1634 { /* no unlikely */ }
1635 else
1636 {
1637 /** @todo Could be optimized this a little in ring-3 if we liked. */
1638 size_t cbRead = 0;
1639 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1640 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1641 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1642 return;
1643 }
1644# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1645
1646 /*
1647 * Look up the physical page info if necessary.
1648 */
1649 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1650 { /* not necessary */ }
1651 else
1652 {
1653 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1654 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1655 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1656 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1657 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1658 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1659 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1660 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1661 }
1662
1663# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1664 /*
1665 * Try do a direct read using the pbMappingR3 pointer.
1666 */
1667 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1668 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1669 {
1670 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1671 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1672 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1673 {
1674 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1675 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1676 }
1677 else
1678 {
1679 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1680 Assert(cbInstr < cbMaxRead);
1681 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1682 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1683 }
1684 if (cbDst <= cbMaxRead)
1685 {
1686 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1687 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1688 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1689 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1690 return;
1691 }
1692 pVCpu->iem.s.pbInstrBuf = NULL;
1693
1694 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1695 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1696 }
1697 else
1698# endif
1699#if 0
1700 /*
1701 * If there is no special read handling, so we can read a bit more and
1702 * put it in the prefetch buffer.
1703 */
1704 if ( cbDst < cbMaxRead
1705 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1706 {
1707 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1708 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1709 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1710 { /* likely */ }
1711 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1712 {
1713 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1714 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1715 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1716 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1717 }
1718 else
1719 {
1720 Log((RT_SUCCESS(rcStrict)
1721 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1722 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1723 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1724 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1725 }
1726 }
1727 /*
1728 * Special read handling, so only read exactly what's needed.
1729 * This is a highly unlikely scenario.
1730 */
1731 else
1732#endif
1733 {
1734 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1735 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1736 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1737 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1738 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1739 { /* likely */ }
1740 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1741 {
1742 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1743 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1744 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1745 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1746 }
1747 else
1748 {
1749 Log((RT_SUCCESS(rcStrict)
1750 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1751 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1752 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1753 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1754 }
1755 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1756 if (cbToRead == cbDst)
1757 return;
1758 }
1759
1760 /*
1761 * More to read, loop.
1762 */
1763 cbDst -= cbMaxRead;
1764 pvDst = (uint8_t *)pvDst + cbMaxRead;
1765 }
1766#else
1767 RT_NOREF(pvDst, cbDst);
1768 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1769#endif
1770}
1771
1772#else
1773
1774/**
1775 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1776 * exception if it fails.
1777 *
1778 * @returns Strict VBox status code.
1779 * @param pVCpu The cross context virtual CPU structure of the
1780 * calling thread.
1781 * @param cbMin The minimum number of bytes relative offOpcode
1782 * that must be read.
1783 */
1784IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1785{
1786 /*
1787 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1788 *
1789 * First translate CS:rIP to a physical address.
1790 */
1791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1792 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1793 uint32_t cbToTryRead;
1794 RTGCPTR GCPtrNext;
1795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1796 {
1797 cbToTryRead = PAGE_SIZE;
1798 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1799 if (!IEM_IS_CANONICAL(GCPtrNext))
1800 return iemRaiseGeneralProtectionFault0(pVCpu);
1801 }
1802 else
1803 {
1804 uint32_t GCPtrNext32 = pCtx->eip;
1805 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1806 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1807 if (GCPtrNext32 > pCtx->cs.u32Limit)
1808 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1809 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1810 if (!cbToTryRead) /* overflowed */
1811 {
1812 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1813 cbToTryRead = UINT32_MAX;
1814 /** @todo check out wrapping around the code segment. */
1815 }
1816 if (cbToTryRead < cbMin - cbLeft)
1817 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1818 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1819 }
1820
1821 /* Only read up to the end of the page, and make sure we don't read more
1822 than the opcode buffer can hold. */
1823 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1824 if (cbToTryRead > cbLeftOnPage)
1825 cbToTryRead = cbLeftOnPage;
1826 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1827 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1828/** @todo r=bird: Convert assertion into undefined opcode exception? */
1829 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1830
1831# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1832 /* Allow interpretation of patch manager code blocks since they can for
1833 instance throw #PFs for perfectly good reasons. */
1834 if (pVCpu->iem.s.fInPatchCode)
1835 {
1836 size_t cbRead = 0;
1837 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1838 AssertRCReturn(rc, rc);
1839 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1840 return VINF_SUCCESS;
1841 }
1842# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1843
1844 RTGCPHYS GCPhys;
1845 uint64_t fFlags;
1846 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1847 if (RT_FAILURE(rc))
1848 {
1849 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1850 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1851 }
1852 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1853 {
1854 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1855 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1856 }
1857 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1858 {
1859 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1860 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1861 }
1862 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1863 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1864 /** @todo Check reserved bits and such stuff. PGM is better at doing
1865 * that, so do it when implementing the guest virtual address
1866 * TLB... */
1867
1868 /*
1869 * Read the bytes at this address.
1870 *
1871 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1872 * and since PATM should only patch the start of an instruction there
1873 * should be no need to check again here.
1874 */
1875 if (!pVCpu->iem.s.fBypassHandlers)
1876 {
1877 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1878 cbToTryRead, PGMACCESSORIGIN_IEM);
1879 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1880 { /* likely */ }
1881 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1882 {
1883 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1884 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1885 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 return rcStrict;
1894 }
1895 }
1896 else
1897 {
1898 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1899 if (RT_SUCCESS(rc))
1900 { /* likely */ }
1901 else
1902 {
1903 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1904 return rc;
1905 }
1906 }
1907 pVCpu->iem.s.cbOpcode += cbToTryRead;
1908 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1909
1910 return VINF_SUCCESS;
1911}
1912
1913#endif /* !IEM_WITH_CODE_TLB */
1914#ifndef IEM_WITH_SETJMP
1915
1916/**
1917 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1918 *
1919 * @returns Strict VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure of the
1921 * calling thread.
1922 * @param pb Where to return the opcode byte.
1923 */
1924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1925{
1926 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1927 if (rcStrict == VINF_SUCCESS)
1928 {
1929 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1930 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1931 pVCpu->iem.s.offOpcode = offOpcode + 1;
1932 }
1933 else
1934 *pb = 0;
1935 return rcStrict;
1936}
1937
1938
1939/**
1940 * Fetches the next opcode byte.
1941 *
1942 * @returns Strict VBox status code.
1943 * @param pVCpu The cross context virtual CPU structure of the
1944 * calling thread.
1945 * @param pu8 Where to return the opcode byte.
1946 */
1947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1948{
1949 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1950 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1951 {
1952 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1953 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1954 return VINF_SUCCESS;
1955 }
1956 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1957}
1958
1959#else /* IEM_WITH_SETJMP */
1960
1961/**
1962 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1963 *
1964 * @returns The opcode byte.
1965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1966 */
1967DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1968{
1969# ifdef IEM_WITH_CODE_TLB
1970 uint8_t u8;
1971 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1972 return u8;
1973# else
1974 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1975 if (rcStrict == VINF_SUCCESS)
1976 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1977 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1978# endif
1979}
1980
1981
1982/**
1983 * Fetches the next opcode byte, longjmp on error.
1984 *
1985 * @returns The opcode byte.
1986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1987 */
1988DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1989{
1990# ifdef IEM_WITH_CODE_TLB
1991 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1992 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1993 if (RT_LIKELY( pbBuf != NULL
1994 && offBuf < pVCpu->iem.s.cbInstrBuf))
1995 {
1996 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1997 return pbBuf[offBuf];
1998 }
1999# else
2000 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2001 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2002 {
2003 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2004 return pVCpu->iem.s.abOpcode[offOpcode];
2005 }
2006# endif
2007 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2008}
2009
2010#endif /* IEM_WITH_SETJMP */
2011
2012/**
2013 * Fetches the next opcode byte, returns automatically on failure.
2014 *
2015 * @param a_pu8 Where to return the opcode byte.
2016 * @remark Implicitly references pVCpu.
2017 */
2018#ifndef IEM_WITH_SETJMP
2019# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2020 do \
2021 { \
2022 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2023 if (rcStrict2 == VINF_SUCCESS) \
2024 { /* likely */ } \
2025 else \
2026 return rcStrict2; \
2027 } while (0)
2028#else
2029# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2030#endif /* IEM_WITH_SETJMP */
2031
2032
2033#ifndef IEM_WITH_SETJMP
2034/**
2035 * Fetches the next signed byte from the opcode stream.
2036 *
2037 * @returns Strict VBox status code.
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param pi8 Where to return the signed byte.
2040 */
2041DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2042{
2043 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2044}
2045#endif /* !IEM_WITH_SETJMP */
2046
2047
2048/**
2049 * Fetches the next signed byte from the opcode stream, returning automatically
2050 * on failure.
2051 *
2052 * @param a_pi8 Where to return the signed byte.
2053 * @remark Implicitly references pVCpu.
2054 */
2055#ifndef IEM_WITH_SETJMP
2056# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2057 do \
2058 { \
2059 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2060 if (rcStrict2 != VINF_SUCCESS) \
2061 return rcStrict2; \
2062 } while (0)
2063#else /* IEM_WITH_SETJMP */
2064# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2065
2066#endif /* IEM_WITH_SETJMP */
2067
2068#ifndef IEM_WITH_SETJMP
2069
2070/**
2071 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2072 *
2073 * @returns Strict VBox status code.
2074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2075 * @param pu16 Where to return the opcode dword.
2076 */
2077DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2078{
2079 uint8_t u8;
2080 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2081 if (rcStrict == VINF_SUCCESS)
2082 *pu16 = (int8_t)u8;
2083 return rcStrict;
2084}
2085
2086
2087/**
2088 * Fetches the next signed byte from the opcode stream, extending it to
2089 * unsigned 16-bit.
2090 *
2091 * @returns Strict VBox status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param pu16 Where to return the unsigned word.
2094 */
2095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2096{
2097 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2098 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2099 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2100
2101 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2102 pVCpu->iem.s.offOpcode = offOpcode + 1;
2103 return VINF_SUCCESS;
2104}
2105
2106#endif /* !IEM_WITH_SETJMP */
2107
2108/**
2109 * Fetches the next signed byte from the opcode stream and sign-extending it to
2110 * a word, returning automatically on failure.
2111 *
2112 * @param a_pu16 Where to return the word.
2113 * @remark Implicitly references pVCpu.
2114 */
2115#ifndef IEM_WITH_SETJMP
2116# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2117 do \
2118 { \
2119 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2120 if (rcStrict2 != VINF_SUCCESS) \
2121 return rcStrict2; \
2122 } while (0)
2123#else
2124# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2125#endif
2126
2127#ifndef IEM_WITH_SETJMP
2128
2129/**
2130 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2131 *
2132 * @returns Strict VBox status code.
2133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2134 * @param pu32 Where to return the opcode dword.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2137{
2138 uint8_t u8;
2139 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2140 if (rcStrict == VINF_SUCCESS)
2141 *pu32 = (int8_t)u8;
2142 return rcStrict;
2143}
2144
2145
2146/**
2147 * Fetches the next signed byte from the opcode stream, extending it to
2148 * unsigned 32-bit.
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 * @param pu32 Where to return the unsigned dword.
2153 */
2154DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2155{
2156 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2157 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2158 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2159
2160 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2161 pVCpu->iem.s.offOpcode = offOpcode + 1;
2162 return VINF_SUCCESS;
2163}
2164
2165#endif /* !IEM_WITH_SETJMP */
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream and sign-extending it to
2169 * a word, returning automatically on failure.
2170 *
2171 * @param a_pu32 Where to return the word.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else
2183# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184#endif
2185
2186#ifndef IEM_WITH_SETJMP
2187
2188/**
2189 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2190 *
2191 * @returns Strict VBox status code.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 * @param pu64 Where to return the opcode qword.
2194 */
2195DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2196{
2197 uint8_t u8;
2198 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2199 if (rcStrict == VINF_SUCCESS)
2200 *pu64 = (int8_t)u8;
2201 return rcStrict;
2202}
2203
2204
2205/**
2206 * Fetches the next signed byte from the opcode stream, extending it to
2207 * unsigned 64-bit.
2208 *
2209 * @returns Strict VBox status code.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 * @param pu64 Where to return the unsigned qword.
2212 */
2213DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2214{
2215 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2216 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2217 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2218
2219 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2220 pVCpu->iem.s.offOpcode = offOpcode + 1;
2221 return VINF_SUCCESS;
2222}
2223
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu64 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode word.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2259 if (rcStrict == VINF_SUCCESS)
2260 {
2261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2263 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2264# else
2265 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2266# endif
2267 pVCpu->iem.s.offOpcode = offOpcode + 2;
2268 }
2269 else
2270 *pu16 = 0;
2271 return rcStrict;
2272}
2273
2274
2275/**
2276 * Fetches the next opcode word.
2277 *
2278 * @returns Strict VBox status code.
2279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2280 * @param pu16 Where to return the opcode word.
2281 */
2282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2283{
2284 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2285 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2286 {
2287 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2288# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2289 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2290# else
2291 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2292# endif
2293 return VINF_SUCCESS;
2294 }
2295 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2296}
2297
2298#else /* IEM_WITH_SETJMP */
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2302 *
2303 * @returns The opcode word.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 */
2306DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2307{
2308# ifdef IEM_WITH_CODE_TLB
2309 uint16_t u16;
2310 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2311 return u16;
2312# else
2313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2314 if (rcStrict == VINF_SUCCESS)
2315 {
2316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2317 pVCpu->iem.s.offOpcode += 2;
2318# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2319 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2320# else
2321 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2322# endif
2323 }
2324 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2325# endif
2326}
2327
2328
2329/**
2330 * Fetches the next opcode word, longjmp on error.
2331 *
2332 * @returns The opcode word.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 */
2335DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2336{
2337# ifdef IEM_WITH_CODE_TLB
2338 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2339 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2340 if (RT_LIKELY( pbBuf != NULL
2341 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2342 {
2343 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2344# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2345 return *(uint16_t const *)&pbBuf[offBuf];
2346# else
2347 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2348# endif
2349 }
2350# else
2351 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2352 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2353 {
2354 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2355# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2356 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2357# else
2358 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2359# endif
2360 }
2361# endif
2362 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2363}
2364
2365#endif /* IEM_WITH_SETJMP */
2366
2367
2368/**
2369 * Fetches the next opcode word, returns automatically on failure.
2370 *
2371 * @param a_pu16 Where to return the opcode word.
2372 * @remark Implicitly references pVCpu.
2373 */
2374#ifndef IEM_WITH_SETJMP
2375# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2376 do \
2377 { \
2378 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2379 if (rcStrict2 != VINF_SUCCESS) \
2380 return rcStrict2; \
2381 } while (0)
2382#else
2383# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2384#endif
2385
2386#ifndef IEM_WITH_SETJMP
2387
2388/**
2389 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu32 Where to return the opcode double word.
2394 */
2395DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2396{
2397 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2398 if (rcStrict == VINF_SUCCESS)
2399 {
2400 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2401 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2402 pVCpu->iem.s.offOpcode = offOpcode + 2;
2403 }
2404 else
2405 *pu32 = 0;
2406 return rcStrict;
2407}
2408
2409
2410/**
2411 * Fetches the next opcode word, zero extending it to a double word.
2412 *
2413 * @returns Strict VBox status code.
2414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2415 * @param pu32 Where to return the opcode double word.
2416 */
2417DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2418{
2419 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2420 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2421 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2422
2423 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2424 pVCpu->iem.s.offOpcode = offOpcode + 2;
2425 return VINF_SUCCESS;
2426}
2427
2428#endif /* !IEM_WITH_SETJMP */
2429
2430
2431/**
2432 * Fetches the next opcode word and zero extends it to a double word, returns
2433 * automatically on failure.
2434 *
2435 * @param a_pu32 Where to return the opcode double word.
2436 * @remark Implicitly references pVCpu.
2437 */
2438#ifndef IEM_WITH_SETJMP
2439# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2440 do \
2441 { \
2442 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2443 if (rcStrict2 != VINF_SUCCESS) \
2444 return rcStrict2; \
2445 } while (0)
2446#else
2447# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2448#endif
2449
2450#ifndef IEM_WITH_SETJMP
2451
2452/**
2453 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2454 *
2455 * @returns Strict VBox status code.
2456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2457 * @param pu64 Where to return the opcode quad word.
2458 */
2459DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2460{
2461 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2462 if (rcStrict == VINF_SUCCESS)
2463 {
2464 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2465 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2466 pVCpu->iem.s.offOpcode = offOpcode + 2;
2467 }
2468 else
2469 *pu64 = 0;
2470 return rcStrict;
2471}
2472
2473
2474/**
2475 * Fetches the next opcode word, zero extending it to a quad word.
2476 *
2477 * @returns Strict VBox status code.
2478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2479 * @param pu64 Where to return the opcode quad word.
2480 */
2481DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2482{
2483 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2484 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2485 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2486
2487 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488 pVCpu->iem.s.offOpcode = offOpcode + 2;
2489 return VINF_SUCCESS;
2490}
2491
2492#endif /* !IEM_WITH_SETJMP */
2493
2494/**
2495 * Fetches the next opcode word and zero extends it to a quad word, returns
2496 * automatically on failure.
2497 *
2498 * @param a_pu64 Where to return the opcode quad word.
2499 * @remark Implicitly references pVCpu.
2500 */
2501#ifndef IEM_WITH_SETJMP
2502# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2503 do \
2504 { \
2505 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2506 if (rcStrict2 != VINF_SUCCESS) \
2507 return rcStrict2; \
2508 } while (0)
2509#else
2510# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2511#endif
2512
2513
2514#ifndef IEM_WITH_SETJMP
2515/**
2516 * Fetches the next signed word from the opcode stream.
2517 *
2518 * @returns Strict VBox status code.
2519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2520 * @param pi16 Where to return the signed word.
2521 */
2522DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2523{
2524 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2525}
2526#endif /* !IEM_WITH_SETJMP */
2527
2528
2529/**
2530 * Fetches the next signed word from the opcode stream, returning automatically
2531 * on failure.
2532 *
2533 * @param a_pi16 Where to return the signed word.
2534 * @remark Implicitly references pVCpu.
2535 */
2536#ifndef IEM_WITH_SETJMP
2537# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2538 do \
2539 { \
2540 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2541 if (rcStrict2 != VINF_SUCCESS) \
2542 return rcStrict2; \
2543 } while (0)
2544#else
2545# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2546#endif
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu32 Where to return the opcode dword.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2567 pVCpu->iem.s.abOpcode[offOpcode + 1],
2568 pVCpu->iem.s.abOpcode[offOpcode + 2],
2569 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2570# endif
2571 pVCpu->iem.s.offOpcode = offOpcode + 4;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode dword.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2590 {
2591 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2593 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2594# else
2595 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2596 pVCpu->iem.s.abOpcode[offOpcode + 1],
2597 pVCpu->iem.s.abOpcode[offOpcode + 2],
2598 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2599# endif
2600 return VINF_SUCCESS;
2601 }
2602 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2603}
2604
2605#else /* !IEM_WITH_SETJMP */
2606
2607/**
2608 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2609 *
2610 * @returns The opcode dword.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 */
2613DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2614{
2615# ifdef IEM_WITH_CODE_TLB
2616 uint32_t u32;
2617 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2618 return u32;
2619# else
2620 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2621 if (rcStrict == VINF_SUCCESS)
2622 {
2623 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2624 pVCpu->iem.s.offOpcode = offOpcode + 4;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 }
2634 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2635# endif
2636}
2637
2638
2639/**
2640 * Fetches the next opcode dword, longjmp on error.
2641 *
2642 * @returns The opcode dword.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 */
2645DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2646{
2647# ifdef IEM_WITH_CODE_TLB
2648 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2649 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2650 if (RT_LIKELY( pbBuf != NULL
2651 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2652 {
2653 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 return *(uint32_t const *)&pbBuf[offBuf];
2656# else
2657 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2658 pbBuf[offBuf + 1],
2659 pbBuf[offBuf + 2],
2660 pbBuf[offBuf + 3]);
2661# endif
2662 }
2663# else
2664 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2665 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2666 {
2667 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2668# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2669 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2670# else
2671 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2672 pVCpu->iem.s.abOpcode[offOpcode + 1],
2673 pVCpu->iem.s.abOpcode[offOpcode + 2],
2674 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2675# endif
2676 }
2677# endif
2678 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2679}
2680
2681#endif /* !IEM_WITH_SETJMP */
2682
2683
2684/**
2685 * Fetches the next opcode dword, returns automatically on failure.
2686 *
2687 * @param a_pu32 Where to return the opcode dword.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2700#endif
2701
2702#ifndef IEM_WITH_SETJMP
2703
2704/**
2705 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pu64 Where to return the opcode dword.
2710 */
2711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2712{
2713 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2714 if (rcStrict == VINF_SUCCESS)
2715 {
2716 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2717 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2718 pVCpu->iem.s.abOpcode[offOpcode + 1],
2719 pVCpu->iem.s.abOpcode[offOpcode + 2],
2720 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2721 pVCpu->iem.s.offOpcode = offOpcode + 4;
2722 }
2723 else
2724 *pu64 = 0;
2725 return rcStrict;
2726}
2727
2728
2729/**
2730 * Fetches the next opcode dword, zero extending it to a quad word.
2731 *
2732 * @returns Strict VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2734 * @param pu64 Where to return the opcode quad word.
2735 */
2736DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2737{
2738 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2739 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2740 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2741
2742 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746 pVCpu->iem.s.offOpcode = offOpcode + 4;
2747 return VINF_SUCCESS;
2748}
2749
2750#endif /* !IEM_WITH_SETJMP */
2751
2752
2753/**
2754 * Fetches the next opcode dword and zero extends it to a quad word, returns
2755 * automatically on failure.
2756 *
2757 * @param a_pu64 Where to return the opcode quad word.
2758 * @remark Implicitly references pVCpu.
2759 */
2760#ifndef IEM_WITH_SETJMP
2761# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2762 do \
2763 { \
2764 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2765 if (rcStrict2 != VINF_SUCCESS) \
2766 return rcStrict2; \
2767 } while (0)
2768#else
2769# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2770#endif
2771
2772
2773#ifndef IEM_WITH_SETJMP
2774/**
2775 * Fetches the next signed double word from the opcode stream.
2776 *
2777 * @returns Strict VBox status code.
2778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2779 * @param pi32 Where to return the signed double word.
2780 */
2781DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2782{
2783 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2784}
2785#endif
2786
2787/**
2788 * Fetches the next signed double word from the opcode stream, returning
2789 * automatically on failure.
2790 *
2791 * @param a_pi32 Where to return the signed double word.
2792 * @remark Implicitly references pVCpu.
2793 */
2794#ifndef IEM_WITH_SETJMP
2795# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2796 do \
2797 { \
2798 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2799 if (rcStrict2 != VINF_SUCCESS) \
2800 return rcStrict2; \
2801 } while (0)
2802#else
2803# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2804#endif
2805
2806#ifndef IEM_WITH_SETJMP
2807
2808/**
2809 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2810 *
2811 * @returns Strict VBox status code.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 * @param pu64 Where to return the opcode qword.
2814 */
2815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2816{
2817 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2818 if (rcStrict == VINF_SUCCESS)
2819 {
2820 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2821 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2822 pVCpu->iem.s.abOpcode[offOpcode + 1],
2823 pVCpu->iem.s.abOpcode[offOpcode + 2],
2824 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2825 pVCpu->iem.s.offOpcode = offOpcode + 4;
2826 }
2827 else
2828 *pu64 = 0;
2829 return rcStrict;
2830}
2831
2832
2833/**
2834 * Fetches the next opcode dword, sign extending it into a quad word.
2835 *
2836 * @returns Strict VBox status code.
2837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2838 * @param pu64 Where to return the opcode quad word.
2839 */
2840DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2841{
2842 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2843 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2844 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2845
2846 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2847 pVCpu->iem.s.abOpcode[offOpcode + 1],
2848 pVCpu->iem.s.abOpcode[offOpcode + 2],
2849 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2850 *pu64 = i32;
2851 pVCpu->iem.s.offOpcode = offOpcode + 4;
2852 return VINF_SUCCESS;
2853}
2854
2855#endif /* !IEM_WITH_SETJMP */
2856
2857
2858/**
2859 * Fetches the next opcode double word and sign extends it to a quad word,
2860 * returns automatically on failure.
2861 *
2862 * @param a_pu64 Where to return the opcode quad word.
2863 * @remark Implicitly references pVCpu.
2864 */
2865#ifndef IEM_WITH_SETJMP
2866# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2867 do \
2868 { \
2869 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2870 if (rcStrict2 != VINF_SUCCESS) \
2871 return rcStrict2; \
2872 } while (0)
2873#else
2874# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2875#endif
2876
2877#ifndef IEM_WITH_SETJMP
2878
2879/**
2880 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2881 *
2882 * @returns Strict VBox status code.
2883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2884 * @param pu64 Where to return the opcode qword.
2885 */
2886DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2887{
2888 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2892# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2893 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2894# else
2895 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2896 pVCpu->iem.s.abOpcode[offOpcode + 1],
2897 pVCpu->iem.s.abOpcode[offOpcode + 2],
2898 pVCpu->iem.s.abOpcode[offOpcode + 3],
2899 pVCpu->iem.s.abOpcode[offOpcode + 4],
2900 pVCpu->iem.s.abOpcode[offOpcode + 5],
2901 pVCpu->iem.s.abOpcode[offOpcode + 6],
2902 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2903# endif
2904 pVCpu->iem.s.offOpcode = offOpcode + 8;
2905 }
2906 else
2907 *pu64 = 0;
2908 return rcStrict;
2909}
2910
2911
2912/**
2913 * Fetches the next opcode qword.
2914 *
2915 * @returns Strict VBox status code.
2916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2917 * @param pu64 Where to return the opcode qword.
2918 */
2919DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2920{
2921 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2922 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2923 {
2924# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2925 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2926# else
2927 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2928 pVCpu->iem.s.abOpcode[offOpcode + 1],
2929 pVCpu->iem.s.abOpcode[offOpcode + 2],
2930 pVCpu->iem.s.abOpcode[offOpcode + 3],
2931 pVCpu->iem.s.abOpcode[offOpcode + 4],
2932 pVCpu->iem.s.abOpcode[offOpcode + 5],
2933 pVCpu->iem.s.abOpcode[offOpcode + 6],
2934 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2935# endif
2936 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2937 return VINF_SUCCESS;
2938 }
2939 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2940}
2941
2942#else /* IEM_WITH_SETJMP */
2943
2944/**
2945 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2946 *
2947 * @returns The opcode qword.
2948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2949 */
2950DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2951{
2952# ifdef IEM_WITH_CODE_TLB
2953 uint64_t u64;
2954 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2955 return u64;
2956# else
2957 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2958 if (rcStrict == VINF_SUCCESS)
2959 {
2960 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2961 pVCpu->iem.s.offOpcode = offOpcode + 8;
2962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2963 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2964# else
2965 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2966 pVCpu->iem.s.abOpcode[offOpcode + 1],
2967 pVCpu->iem.s.abOpcode[offOpcode + 2],
2968 pVCpu->iem.s.abOpcode[offOpcode + 3],
2969 pVCpu->iem.s.abOpcode[offOpcode + 4],
2970 pVCpu->iem.s.abOpcode[offOpcode + 5],
2971 pVCpu->iem.s.abOpcode[offOpcode + 6],
2972 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2973# endif
2974 }
2975 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2976# endif
2977}
2978
2979
2980/**
2981 * Fetches the next opcode qword, longjmp on error.
2982 *
2983 * @returns The opcode qword.
2984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2985 */
2986DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2987{
2988# ifdef IEM_WITH_CODE_TLB
2989 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2990 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2991 if (RT_LIKELY( pbBuf != NULL
2992 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2993 {
2994 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2995# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2996 return *(uint64_t const *)&pbBuf[offBuf];
2997# else
2998 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2999 pbBuf[offBuf + 1],
3000 pbBuf[offBuf + 2],
3001 pbBuf[offBuf + 3],
3002 pbBuf[offBuf + 4],
3003 pbBuf[offBuf + 5],
3004 pbBuf[offBuf + 6],
3005 pbBuf[offBuf + 7]);
3006# endif
3007 }
3008# else
3009 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3010 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3011 {
3012 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3013# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3014 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3015# else
3016 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3017 pVCpu->iem.s.abOpcode[offOpcode + 1],
3018 pVCpu->iem.s.abOpcode[offOpcode + 2],
3019 pVCpu->iem.s.abOpcode[offOpcode + 3],
3020 pVCpu->iem.s.abOpcode[offOpcode + 4],
3021 pVCpu->iem.s.abOpcode[offOpcode + 5],
3022 pVCpu->iem.s.abOpcode[offOpcode + 6],
3023 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3024# endif
3025 }
3026# endif
3027 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3028}
3029
3030#endif /* IEM_WITH_SETJMP */
3031
3032/**
3033 * Fetches the next opcode quad word, returns automatically on failure.
3034 *
3035 * @param a_pu64 Where to return the opcode quad word.
3036 * @remark Implicitly references pVCpu.
3037 */
3038#ifndef IEM_WITH_SETJMP
3039# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3040 do \
3041 { \
3042 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3043 if (rcStrict2 != VINF_SUCCESS) \
3044 return rcStrict2; \
3045 } while (0)
3046#else
3047# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3048#endif
3049
3050
3051/** @name Misc Worker Functions.
3052 * @{
3053 */
3054
3055
3056/**
3057 * Validates a new SS segment.
3058 *
3059 * @returns VBox strict status code.
3060 * @param pVCpu The cross context virtual CPU structure of the
3061 * calling thread.
3062 * @param pCtx The CPU context.
3063 * @param NewSS The new SS selctor.
3064 * @param uCpl The CPL to load the stack for.
3065 * @param pDesc Where to return the descriptor.
3066 */
3067IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3068{
3069 NOREF(pCtx);
3070
3071 /* Null selectors are not allowed (we're not called for dispatching
3072 interrupts with SS=0 in long mode). */
3073 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3074 {
3075 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3076 return iemRaiseTaskSwitchFault0(pVCpu);
3077 }
3078
3079 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3080 if ((NewSS & X86_SEL_RPL) != uCpl)
3081 {
3082 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3083 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3084 }
3085
3086 /*
3087 * Read the descriptor.
3088 */
3089 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3090 if (rcStrict != VINF_SUCCESS)
3091 return rcStrict;
3092
3093 /*
3094 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3095 */
3096 if (!pDesc->Legacy.Gen.u1DescType)
3097 {
3098 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3099 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3100 }
3101
3102 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3103 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3104 {
3105 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3106 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3107 }
3108 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3109 {
3110 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3111 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3112 }
3113
3114 /* Is it there? */
3115 /** @todo testcase: Is this checked before the canonical / limit check below? */
3116 if (!pDesc->Legacy.Gen.u1Present)
3117 {
3118 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3119 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3120 }
3121
3122 return VINF_SUCCESS;
3123}
3124
3125
3126/**
3127 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3128 * not.
3129 *
3130 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3131 * @param a_pCtx The CPU context.
3132 */
3133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3134# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3135 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3136 ? (a_pCtx)->eflags.u \
3137 : CPUMRawGetEFlags(a_pVCpu) )
3138#else
3139# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3140 ( (a_pCtx)->eflags.u )
3141#endif
3142
3143/**
3144 * Updates the EFLAGS in the correct manner wrt. PATM.
3145 *
3146 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3147 * @param a_pCtx The CPU context.
3148 * @param a_fEfl The new EFLAGS.
3149 */
3150#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3151# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3152 do { \
3153 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3154 (a_pCtx)->eflags.u = (a_fEfl); \
3155 else \
3156 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3157 } while (0)
3158#else
3159# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3160 do { \
3161 (a_pCtx)->eflags.u = (a_fEfl); \
3162 } while (0)
3163#endif
3164
3165
3166/** @} */
3167
3168/** @name Raising Exceptions.
3169 *
3170 * @{
3171 */
3172
3173/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3174 * @{ */
3175/** CPU exception. */
3176#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3177/** External interrupt (from PIC, APIC, whatever). */
3178#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3179/** Software interrupt (int or into, not bound).
3180 * Returns to the following instruction */
3181#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3182/** Takes an error code. */
3183#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3184/** Takes a CR2. */
3185#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3186/** Generated by the breakpoint instruction. */
3187#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3188/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3189#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3190/** @} */
3191
3192
3193/**
3194 * Loads the specified stack far pointer from the TSS.
3195 *
3196 * @returns VBox strict status code.
3197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3198 * @param pCtx The CPU context.
3199 * @param uCpl The CPL to load the stack for.
3200 * @param pSelSS Where to return the new stack segment.
3201 * @param puEsp Where to return the new stack pointer.
3202 */
3203IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3204 PRTSEL pSelSS, uint32_t *puEsp)
3205{
3206 VBOXSTRICTRC rcStrict;
3207 Assert(uCpl < 4);
3208
3209 switch (pCtx->tr.Attr.n.u4Type)
3210 {
3211 /*
3212 * 16-bit TSS (X86TSS16).
3213 */
3214 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3215 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3216 {
3217 uint32_t off = uCpl * 4 + 2;
3218 if (off + 4 <= pCtx->tr.u32Limit)
3219 {
3220 /** @todo check actual access pattern here. */
3221 uint32_t u32Tmp = 0; /* gcc maybe... */
3222 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3223 if (rcStrict == VINF_SUCCESS)
3224 {
3225 *puEsp = RT_LOWORD(u32Tmp);
3226 *pSelSS = RT_HIWORD(u32Tmp);
3227 return VINF_SUCCESS;
3228 }
3229 }
3230 else
3231 {
3232 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3233 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3234 }
3235 break;
3236 }
3237
3238 /*
3239 * 32-bit TSS (X86TSS32).
3240 */
3241 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3242 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3243 {
3244 uint32_t off = uCpl * 8 + 4;
3245 if (off + 7 <= pCtx->tr.u32Limit)
3246 {
3247/** @todo check actual access pattern here. */
3248 uint64_t u64Tmp;
3249 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3250 if (rcStrict == VINF_SUCCESS)
3251 {
3252 *puEsp = u64Tmp & UINT32_MAX;
3253 *pSelSS = (RTSEL)(u64Tmp >> 32);
3254 return VINF_SUCCESS;
3255 }
3256 }
3257 else
3258 {
3259 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3260 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3261 }
3262 break;
3263 }
3264
3265 default:
3266 AssertFailed();
3267 rcStrict = VERR_IEM_IPE_4;
3268 break;
3269 }
3270
3271 *puEsp = 0; /* make gcc happy */
3272 *pSelSS = 0; /* make gcc happy */
3273 return rcStrict;
3274}
3275
3276
3277/**
3278 * Loads the specified stack pointer from the 64-bit TSS.
3279 *
3280 * @returns VBox strict status code.
3281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3282 * @param pCtx The CPU context.
3283 * @param uCpl The CPL to load the stack for.
3284 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3285 * @param puRsp Where to return the new stack pointer.
3286 */
3287IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3288{
3289 Assert(uCpl < 4);
3290 Assert(uIst < 8);
3291 *puRsp = 0; /* make gcc happy */
3292
3293 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3294
3295 uint32_t off;
3296 if (uIst)
3297 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3298 else
3299 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3300 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3301 {
3302 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3303 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3304 }
3305
3306 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3307}
3308
3309
3310/**
3311 * Adjust the CPU state according to the exception being raised.
3312 *
3313 * @param pCtx The CPU context.
3314 * @param u8Vector The exception that has been raised.
3315 */
3316DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3317{
3318 switch (u8Vector)
3319 {
3320 case X86_XCPT_DB:
3321 pCtx->dr[7] &= ~X86_DR7_GD;
3322 break;
3323 /** @todo Read the AMD and Intel exception reference... */
3324 }
3325}
3326
3327
3328/**
3329 * Implements exceptions and interrupts for real mode.
3330 *
3331 * @returns VBox strict status code.
3332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3333 * @param pCtx The CPU context.
3334 * @param cbInstr The number of bytes to offset rIP by in the return
3335 * address.
3336 * @param u8Vector The interrupt / exception vector number.
3337 * @param fFlags The flags.
3338 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3339 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3340 */
3341IEM_STATIC VBOXSTRICTRC
3342iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3343 PCPUMCTX pCtx,
3344 uint8_t cbInstr,
3345 uint8_t u8Vector,
3346 uint32_t fFlags,
3347 uint16_t uErr,
3348 uint64_t uCr2)
3349{
3350 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3351 NOREF(uErr); NOREF(uCr2);
3352
3353 /*
3354 * Read the IDT entry.
3355 */
3356 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3357 {
3358 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3359 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3360 }
3361 RTFAR16 Idte;
3362 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3363 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3364 return rcStrict;
3365
3366 /*
3367 * Push the stack frame.
3368 */
3369 uint16_t *pu16Frame;
3370 uint64_t uNewRsp;
3371 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3372 if (rcStrict != VINF_SUCCESS)
3373 return rcStrict;
3374
3375 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3376#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3377 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3378 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3379 fEfl |= UINT16_C(0xf000);
3380#endif
3381 pu16Frame[2] = (uint16_t)fEfl;
3382 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3383 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3384 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3385 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3386 return rcStrict;
3387
3388 /*
3389 * Load the vector address into cs:ip and make exception specific state
3390 * adjustments.
3391 */
3392 pCtx->cs.Sel = Idte.sel;
3393 pCtx->cs.ValidSel = Idte.sel;
3394 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3395 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3396 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3397 pCtx->rip = Idte.off;
3398 fEfl &= ~X86_EFL_IF;
3399 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3400
3401 /** @todo do we actually do this in real mode? */
3402 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3403 iemRaiseXcptAdjustState(pCtx, u8Vector);
3404
3405 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3406}
3407
3408
3409/**
3410 * Loads a NULL data selector into when coming from V8086 mode.
3411 *
3412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3413 * @param pSReg Pointer to the segment register.
3414 */
3415IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3416{
3417 pSReg->Sel = 0;
3418 pSReg->ValidSel = 0;
3419 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3420 {
3421 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3422 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3423 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3424 }
3425 else
3426 {
3427 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3428 /** @todo check this on AMD-V */
3429 pSReg->u64Base = 0;
3430 pSReg->u32Limit = 0;
3431 }
3432}
3433
3434
3435/**
3436 * Loads a segment selector during a task switch in V8086 mode.
3437 *
3438 * @param pSReg Pointer to the segment register.
3439 * @param uSel The selector value to load.
3440 */
3441IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3442{
3443 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3444 pSReg->Sel = uSel;
3445 pSReg->ValidSel = uSel;
3446 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3447 pSReg->u64Base = uSel << 4;
3448 pSReg->u32Limit = 0xffff;
3449 pSReg->Attr.u = 0xf3;
3450}
3451
3452
3453/**
3454 * Loads a NULL data selector into a selector register, both the hidden and
3455 * visible parts, in protected mode.
3456 *
3457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param pSReg Pointer to the segment register.
3459 * @param uRpl The RPL.
3460 */
3461IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3462{
3463 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3464 * data selector in protected mode. */
3465 pSReg->Sel = uRpl;
3466 pSReg->ValidSel = uRpl;
3467 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3468 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3469 {
3470 /* VT-x (Intel 3960x) observed doing something like this. */
3471 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3472 pSReg->u32Limit = UINT32_MAX;
3473 pSReg->u64Base = 0;
3474 }
3475 else
3476 {
3477 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3478 pSReg->u32Limit = 0;
3479 pSReg->u64Base = 0;
3480 }
3481}
3482
3483
3484/**
3485 * Loads a segment selector during a task switch in protected mode.
3486 *
3487 * In this task switch scenario, we would throw \#TS exceptions rather than
3488 * \#GPs.
3489 *
3490 * @returns VBox strict status code.
3491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3492 * @param pSReg Pointer to the segment register.
3493 * @param uSel The new selector value.
3494 *
3495 * @remarks This does _not_ handle CS or SS.
3496 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3497 */
3498IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3499{
3500 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3501
3502 /* Null data selector. */
3503 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3504 {
3505 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3507 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3508 return VINF_SUCCESS;
3509 }
3510
3511 /* Fetch the descriptor. */
3512 IEMSELDESC Desc;
3513 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3514 if (rcStrict != VINF_SUCCESS)
3515 {
3516 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3517 VBOXSTRICTRC_VAL(rcStrict)));
3518 return rcStrict;
3519 }
3520
3521 /* Must be a data segment or readable code segment. */
3522 if ( !Desc.Legacy.Gen.u1DescType
3523 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3524 {
3525 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3526 Desc.Legacy.Gen.u4Type));
3527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3528 }
3529
3530 /* Check privileges for data segments and non-conforming code segments. */
3531 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3532 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3533 {
3534 /* The RPL and the new CPL must be less than or equal to the DPL. */
3535 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3536 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3537 {
3538 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3539 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3541 }
3542 }
3543
3544 /* Is it there? */
3545 if (!Desc.Legacy.Gen.u1Present)
3546 {
3547 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3548 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3549 }
3550
3551 /* The base and limit. */
3552 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3553 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3554
3555 /*
3556 * Ok, everything checked out fine. Now set the accessed bit before
3557 * committing the result into the registers.
3558 */
3559 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3560 {
3561 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3562 if (rcStrict != VINF_SUCCESS)
3563 return rcStrict;
3564 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3565 }
3566
3567 /* Commit */
3568 pSReg->Sel = uSel;
3569 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3570 pSReg->u32Limit = cbLimit;
3571 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3572 pSReg->ValidSel = uSel;
3573 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3574 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3575 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3576
3577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3578 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3579 return VINF_SUCCESS;
3580}
3581
3582
3583/**
3584 * Performs a task switch.
3585 *
3586 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3587 * caller is responsible for performing the necessary checks (like DPL, TSS
3588 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3589 * reference for JMP, CALL, IRET.
3590 *
3591 * If the task switch is the due to a software interrupt or hardware exception,
3592 * the caller is responsible for validating the TSS selector and descriptor. See
3593 * Intel Instruction reference for INT n.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param pCtx The CPU context.
3598 * @param enmTaskSwitch What caused this task switch.
3599 * @param uNextEip The EIP effective after the task switch.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 * @param SelTSS The TSS selector of the new task.
3604 * @param pNewDescTSS Pointer to the new TSS descriptor.
3605 */
3606IEM_STATIC VBOXSTRICTRC
3607iemTaskSwitch(PVMCPU pVCpu,
3608 PCPUMCTX pCtx,
3609 IEMTASKSWITCH enmTaskSwitch,
3610 uint32_t uNextEip,
3611 uint32_t fFlags,
3612 uint16_t uErr,
3613 uint64_t uCr2,
3614 RTSEL SelTSS,
3615 PIEMSELDESC pNewDescTSS)
3616{
3617 Assert(!IEM_IS_REAL_MODE(pVCpu));
3618 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3619
3620 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3621 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3622 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3623 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3624 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3625
3626 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3627 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3628
3629 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3630 fIsNewTSS386, pCtx->eip, uNextEip));
3631
3632 /* Update CR2 in case it's a page-fault. */
3633 /** @todo This should probably be done much earlier in IEM/PGM. See
3634 * @bugref{5653#c49}. */
3635 if (fFlags & IEM_XCPT_FLAGS_CR2)
3636 pCtx->cr2 = uCr2;
3637
3638 /*
3639 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3640 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3641 */
3642 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3643 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3644 if (uNewTSSLimit < uNewTSSLimitMin)
3645 {
3646 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3647 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3648 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3649 }
3650
3651 /*
3652 * Check the current TSS limit. The last written byte to the current TSS during the
3653 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3654 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3655 *
3656 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3657 * end up with smaller than "legal" TSS limits.
3658 */
3659 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3660 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3661 if (uCurTSSLimit < uCurTSSLimitMin)
3662 {
3663 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3664 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3665 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3666 }
3667
3668 /*
3669 * Verify that the new TSS can be accessed and map it. Map only the required contents
3670 * and not the entire TSS.
3671 */
3672 void *pvNewTSS;
3673 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3674 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3675 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3676 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3677 * not perform correct translation if this happens. See Intel spec. 7.2.1
3678 * "Task-State Segment" */
3679 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3680 if (rcStrict != VINF_SUCCESS)
3681 {
3682 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3683 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3684 return rcStrict;
3685 }
3686
3687 /*
3688 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3689 */
3690 uint32_t u32EFlags = pCtx->eflags.u32;
3691 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3692 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3693 {
3694 PX86DESC pDescCurTSS;
3695 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3696 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3697 if (rcStrict != VINF_SUCCESS)
3698 {
3699 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3700 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703
3704 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3705 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3706 if (rcStrict != VINF_SUCCESS)
3707 {
3708 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3709 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3710 return rcStrict;
3711 }
3712
3713 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3714 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3715 {
3716 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3717 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3718 u32EFlags &= ~X86_EFL_NT;
3719 }
3720 }
3721
3722 /*
3723 * Save the CPU state into the current TSS.
3724 */
3725 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3726 if (GCPtrNewTSS == GCPtrCurTSS)
3727 {
3728 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3729 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3730 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3731 }
3732 if (fIsNewTSS386)
3733 {
3734 /*
3735 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3736 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3737 */
3738 void *pvCurTSS32;
3739 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3740 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3741 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3742 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3743 if (rcStrict != VINF_SUCCESS)
3744 {
3745 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3746 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3747 return rcStrict;
3748 }
3749
3750 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3751 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3752 pCurTSS32->eip = uNextEip;
3753 pCurTSS32->eflags = u32EFlags;
3754 pCurTSS32->eax = pCtx->eax;
3755 pCurTSS32->ecx = pCtx->ecx;
3756 pCurTSS32->edx = pCtx->edx;
3757 pCurTSS32->ebx = pCtx->ebx;
3758 pCurTSS32->esp = pCtx->esp;
3759 pCurTSS32->ebp = pCtx->ebp;
3760 pCurTSS32->esi = pCtx->esi;
3761 pCurTSS32->edi = pCtx->edi;
3762 pCurTSS32->es = pCtx->es.Sel;
3763 pCurTSS32->cs = pCtx->cs.Sel;
3764 pCurTSS32->ss = pCtx->ss.Sel;
3765 pCurTSS32->ds = pCtx->ds.Sel;
3766 pCurTSS32->fs = pCtx->fs.Sel;
3767 pCurTSS32->gs = pCtx->gs.Sel;
3768
3769 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3770 if (rcStrict != VINF_SUCCESS)
3771 {
3772 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3773 VBOXSTRICTRC_VAL(rcStrict)));
3774 return rcStrict;
3775 }
3776 }
3777 else
3778 {
3779 /*
3780 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3781 */
3782 void *pvCurTSS16;
3783 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3784 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3785 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3786 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3787 if (rcStrict != VINF_SUCCESS)
3788 {
3789 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3790 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3791 return rcStrict;
3792 }
3793
3794 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3795 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3796 pCurTSS16->ip = uNextEip;
3797 pCurTSS16->flags = u32EFlags;
3798 pCurTSS16->ax = pCtx->ax;
3799 pCurTSS16->cx = pCtx->cx;
3800 pCurTSS16->dx = pCtx->dx;
3801 pCurTSS16->bx = pCtx->bx;
3802 pCurTSS16->sp = pCtx->sp;
3803 pCurTSS16->bp = pCtx->bp;
3804 pCurTSS16->si = pCtx->si;
3805 pCurTSS16->di = pCtx->di;
3806 pCurTSS16->es = pCtx->es.Sel;
3807 pCurTSS16->cs = pCtx->cs.Sel;
3808 pCurTSS16->ss = pCtx->ss.Sel;
3809 pCurTSS16->ds = pCtx->ds.Sel;
3810
3811 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3812 if (rcStrict != VINF_SUCCESS)
3813 {
3814 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3815 VBOXSTRICTRC_VAL(rcStrict)));
3816 return rcStrict;
3817 }
3818 }
3819
3820 /*
3821 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3822 */
3823 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3824 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3825 {
3826 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3827 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3828 pNewTSS->selPrev = pCtx->tr.Sel;
3829 }
3830
3831 /*
3832 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3833 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3834 */
3835 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3836 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3837 bool fNewDebugTrap;
3838 if (fIsNewTSS386)
3839 {
3840 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3841 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3842 uNewEip = pNewTSS32->eip;
3843 uNewEflags = pNewTSS32->eflags;
3844 uNewEax = pNewTSS32->eax;
3845 uNewEcx = pNewTSS32->ecx;
3846 uNewEdx = pNewTSS32->edx;
3847 uNewEbx = pNewTSS32->ebx;
3848 uNewEsp = pNewTSS32->esp;
3849 uNewEbp = pNewTSS32->ebp;
3850 uNewEsi = pNewTSS32->esi;
3851 uNewEdi = pNewTSS32->edi;
3852 uNewES = pNewTSS32->es;
3853 uNewCS = pNewTSS32->cs;
3854 uNewSS = pNewTSS32->ss;
3855 uNewDS = pNewTSS32->ds;
3856 uNewFS = pNewTSS32->fs;
3857 uNewGS = pNewTSS32->gs;
3858 uNewLdt = pNewTSS32->selLdt;
3859 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3860 }
3861 else
3862 {
3863 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3864 uNewCr3 = 0;
3865 uNewEip = pNewTSS16->ip;
3866 uNewEflags = pNewTSS16->flags;
3867 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3868 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3869 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3870 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3871 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3872 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3873 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3874 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3875 uNewES = pNewTSS16->es;
3876 uNewCS = pNewTSS16->cs;
3877 uNewSS = pNewTSS16->ss;
3878 uNewDS = pNewTSS16->ds;
3879 uNewFS = 0;
3880 uNewGS = 0;
3881 uNewLdt = pNewTSS16->selLdt;
3882 fNewDebugTrap = false;
3883 }
3884
3885 if (GCPtrNewTSS == GCPtrCurTSS)
3886 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3887 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3888
3889 /*
3890 * We're done accessing the new TSS.
3891 */
3892 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3893 if (rcStrict != VINF_SUCCESS)
3894 {
3895 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898
3899 /*
3900 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3901 */
3902 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3903 {
3904 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3905 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3906 if (rcStrict != VINF_SUCCESS)
3907 {
3908 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3909 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3910 return rcStrict;
3911 }
3912
3913 /* Check that the descriptor indicates the new TSS is available (not busy). */
3914 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3915 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3916 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3917
3918 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3919 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3920 if (rcStrict != VINF_SUCCESS)
3921 {
3922 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3923 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3924 return rcStrict;
3925 }
3926 }
3927
3928 /*
3929 * From this point on, we're technically in the new task. We will defer exceptions
3930 * until the completion of the task switch but before executing any instructions in the new task.
3931 */
3932 pCtx->tr.Sel = SelTSS;
3933 pCtx->tr.ValidSel = SelTSS;
3934 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3935 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3936 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3937 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3938 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3939
3940 /* Set the busy bit in TR. */
3941 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3942 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3943 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3944 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3945 {
3946 uNewEflags |= X86_EFL_NT;
3947 }
3948
3949 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3950 pCtx->cr0 |= X86_CR0_TS;
3951 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3952
3953 pCtx->eip = uNewEip;
3954 pCtx->eax = uNewEax;
3955 pCtx->ecx = uNewEcx;
3956 pCtx->edx = uNewEdx;
3957 pCtx->ebx = uNewEbx;
3958 pCtx->esp = uNewEsp;
3959 pCtx->ebp = uNewEbp;
3960 pCtx->esi = uNewEsi;
3961 pCtx->edi = uNewEdi;
3962
3963 uNewEflags &= X86_EFL_LIVE_MASK;
3964 uNewEflags |= X86_EFL_RA1_MASK;
3965 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3966
3967 /*
3968 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3969 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3970 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3971 */
3972 pCtx->es.Sel = uNewES;
3973 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3974
3975 pCtx->cs.Sel = uNewCS;
3976 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3977
3978 pCtx->ss.Sel = uNewSS;
3979 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3980
3981 pCtx->ds.Sel = uNewDS;
3982 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3983
3984 pCtx->fs.Sel = uNewFS;
3985 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3986
3987 pCtx->gs.Sel = uNewGS;
3988 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3990
3991 pCtx->ldtr.Sel = uNewLdt;
3992 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3993 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3994 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3995
3996 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3997 {
3998 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3999 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4000 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4001 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4002 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4003 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4004 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4005 }
4006
4007 /*
4008 * Switch CR3 for the new task.
4009 */
4010 if ( fIsNewTSS386
4011 && (pCtx->cr0 & X86_CR0_PG))
4012 {
4013 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4014 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4015 {
4016 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4017 AssertRCSuccessReturn(rc, rc);
4018 }
4019 else
4020 pCtx->cr3 = uNewCr3;
4021
4022 /* Inform PGM. */
4023 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4024 {
4025 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4026 AssertRCReturn(rc, rc);
4027 /* ignore informational status codes */
4028 }
4029 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4030 }
4031
4032 /*
4033 * Switch LDTR for the new task.
4034 */
4035 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4036 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4037 else
4038 {
4039 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4040
4041 IEMSELDESC DescNewLdt;
4042 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4043 if (rcStrict != VINF_SUCCESS)
4044 {
4045 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4046 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4047 return rcStrict;
4048 }
4049 if ( !DescNewLdt.Legacy.Gen.u1Present
4050 || DescNewLdt.Legacy.Gen.u1DescType
4051 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4052 {
4053 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4054 uNewLdt, DescNewLdt.Legacy.u));
4055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4056 }
4057
4058 pCtx->ldtr.ValidSel = uNewLdt;
4059 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4060 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4061 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4062 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4063 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4064 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4066 }
4067
4068 IEMSELDESC DescSS;
4069 if (IEM_IS_V86_MODE(pVCpu))
4070 {
4071 pVCpu->iem.s.uCpl = 3;
4072 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4073 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4074 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4075 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4076 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4077 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4078
4079 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4080 DescSS.Legacy.u = 0;
4081 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4082 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4083 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4084 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4085 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4086 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4087 DescSS.Legacy.Gen.u2Dpl = 3;
4088 }
4089 else
4090 {
4091 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4092
4093 /*
4094 * Load the stack segment for the new task.
4095 */
4096 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4097 {
4098 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4100 }
4101
4102 /* Fetch the descriptor. */
4103 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4107 VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* SS must be a data segment and writable. */
4112 if ( !DescSS.Legacy.Gen.u1DescType
4113 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4114 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4115 {
4116 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4117 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4118 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4119 }
4120
4121 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4122 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4123 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4124 {
4125 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4126 uNewCpl));
4127 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4128 }
4129
4130 /* Is it there? */
4131 if (!DescSS.Legacy.Gen.u1Present)
4132 {
4133 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4134 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4135 }
4136
4137 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4138 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4139
4140 /* Set the accessed bit before committing the result into SS. */
4141 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4142 {
4143 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4144 if (rcStrict != VINF_SUCCESS)
4145 return rcStrict;
4146 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4147 }
4148
4149 /* Commit SS. */
4150 pCtx->ss.Sel = uNewSS;
4151 pCtx->ss.ValidSel = uNewSS;
4152 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4153 pCtx->ss.u32Limit = cbLimit;
4154 pCtx->ss.u64Base = u64Base;
4155 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4157
4158 /* CPL has changed, update IEM before loading rest of segments. */
4159 pVCpu->iem.s.uCpl = uNewCpl;
4160
4161 /*
4162 * Load the data segments for the new task.
4163 */
4164 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4165 if (rcStrict != VINF_SUCCESS)
4166 return rcStrict;
4167 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4168 if (rcStrict != VINF_SUCCESS)
4169 return rcStrict;
4170 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4171 if (rcStrict != VINF_SUCCESS)
4172 return rcStrict;
4173 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4174 if (rcStrict != VINF_SUCCESS)
4175 return rcStrict;
4176
4177 /*
4178 * Load the code segment for the new task.
4179 */
4180 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4181 {
4182 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4184 }
4185
4186 /* Fetch the descriptor. */
4187 IEMSELDESC DescCS;
4188 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4189 if (rcStrict != VINF_SUCCESS)
4190 {
4191 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4192 return rcStrict;
4193 }
4194
4195 /* CS must be a code segment. */
4196 if ( !DescCS.Legacy.Gen.u1DescType
4197 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4198 {
4199 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4200 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4201 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4202 }
4203
4204 /* For conforming CS, DPL must be less than or equal to the RPL. */
4205 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4206 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4207 {
4208 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4209 DescCS.Legacy.Gen.u2Dpl));
4210 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4211 }
4212
4213 /* For non-conforming CS, DPL must match RPL. */
4214 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4215 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4216 {
4217 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4218 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4220 }
4221
4222 /* Is it there? */
4223 if (!DescCS.Legacy.Gen.u1Present)
4224 {
4225 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4226 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4227 }
4228
4229 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4230 u64Base = X86DESC_BASE(&DescCS.Legacy);
4231
4232 /* Set the accessed bit before committing the result into CS. */
4233 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4234 {
4235 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4239 }
4240
4241 /* Commit CS. */
4242 pCtx->cs.Sel = uNewCS;
4243 pCtx->cs.ValidSel = uNewCS;
4244 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4245 pCtx->cs.u32Limit = cbLimit;
4246 pCtx->cs.u64Base = u64Base;
4247 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4249 }
4250
4251 /** @todo Debug trap. */
4252 if (fIsNewTSS386 && fNewDebugTrap)
4253 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4254
4255 /*
4256 * Construct the error code masks based on what caused this task switch.
4257 * See Intel Instruction reference for INT.
4258 */
4259 uint16_t uExt;
4260 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4261 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4262 {
4263 uExt = 1;
4264 }
4265 else
4266 uExt = 0;
4267
4268 /*
4269 * Push any error code on to the new stack.
4270 */
4271 if (fFlags & IEM_XCPT_FLAGS_ERR)
4272 {
4273 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4274 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4275 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4276
4277 /* Check that there is sufficient space on the stack. */
4278 /** @todo Factor out segment limit checking for normal/expand down segments
4279 * into a separate function. */
4280 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4281 {
4282 if ( pCtx->esp - 1 > cbLimitSS
4283 || pCtx->esp < cbStackFrame)
4284 {
4285 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4286 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4287 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4288 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4289 }
4290 }
4291 else
4292 {
4293 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4294 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4295 {
4296 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4297 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4298 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4299 }
4300 }
4301
4302
4303 if (fIsNewTSS386)
4304 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4305 else
4306 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4307 if (rcStrict != VINF_SUCCESS)
4308 {
4309 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4310 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4311 return rcStrict;
4312 }
4313 }
4314
4315 /* Check the new EIP against the new CS limit. */
4316 if (pCtx->eip > pCtx->cs.u32Limit)
4317 {
4318 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4319 pCtx->eip, pCtx->cs.u32Limit));
4320 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4321 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4322 }
4323
4324 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4325 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4326}
4327
4328
4329/**
4330 * Implements exceptions and interrupts for protected mode.
4331 *
4332 * @returns VBox strict status code.
4333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4334 * @param pCtx The CPU context.
4335 * @param cbInstr The number of bytes to offset rIP by in the return
4336 * address.
4337 * @param u8Vector The interrupt / exception vector number.
4338 * @param fFlags The flags.
4339 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4340 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4341 */
4342IEM_STATIC VBOXSTRICTRC
4343iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4344 PCPUMCTX pCtx,
4345 uint8_t cbInstr,
4346 uint8_t u8Vector,
4347 uint32_t fFlags,
4348 uint16_t uErr,
4349 uint64_t uCr2)
4350{
4351 /*
4352 * Read the IDT entry.
4353 */
4354 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4355 {
4356 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4357 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4358 }
4359 X86DESC Idte;
4360 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4361 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4362 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4363 return rcStrict;
4364 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4365 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4366 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4367
4368 /*
4369 * Check the descriptor type, DPL and such.
4370 * ASSUMES this is done in the same order as described for call-gate calls.
4371 */
4372 if (Idte.Gate.u1DescType)
4373 {
4374 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4375 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4376 }
4377 bool fTaskGate = false;
4378 uint8_t f32BitGate = true;
4379 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4380 switch (Idte.Gate.u4Type)
4381 {
4382 case X86_SEL_TYPE_SYS_UNDEFINED:
4383 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4384 case X86_SEL_TYPE_SYS_LDT:
4385 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4386 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4387 case X86_SEL_TYPE_SYS_UNDEFINED2:
4388 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4389 case X86_SEL_TYPE_SYS_UNDEFINED3:
4390 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4391 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4392 case X86_SEL_TYPE_SYS_UNDEFINED4:
4393 {
4394 /** @todo check what actually happens when the type is wrong...
4395 * esp. call gates. */
4396 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4397 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4398 }
4399
4400 case X86_SEL_TYPE_SYS_286_INT_GATE:
4401 f32BitGate = false;
4402 case X86_SEL_TYPE_SYS_386_INT_GATE:
4403 fEflToClear |= X86_EFL_IF;
4404 break;
4405
4406 case X86_SEL_TYPE_SYS_TASK_GATE:
4407 fTaskGate = true;
4408#ifndef IEM_IMPLEMENTS_TASKSWITCH
4409 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4410#endif
4411 break;
4412
4413 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4414 f32BitGate = false;
4415 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4416 break;
4417
4418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4419 }
4420
4421 /* Check DPL against CPL if applicable. */
4422 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4423 {
4424 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4425 {
4426 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4427 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4428 }
4429 }
4430
4431 /* Is it there? */
4432 if (!Idte.Gate.u1Present)
4433 {
4434 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4435 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4436 }
4437
4438 /* Is it a task-gate? */
4439 if (fTaskGate)
4440 {
4441 /*
4442 * Construct the error code masks based on what caused this task switch.
4443 * See Intel Instruction reference for INT.
4444 */
4445 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4446 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4447 RTSEL SelTSS = Idte.Gate.u16Sel;
4448
4449 /*
4450 * Fetch the TSS descriptor in the GDT.
4451 */
4452 IEMSELDESC DescTSS;
4453 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4454 if (rcStrict != VINF_SUCCESS)
4455 {
4456 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4457 VBOXSTRICTRC_VAL(rcStrict)));
4458 return rcStrict;
4459 }
4460
4461 /* The TSS descriptor must be a system segment and be available (not busy). */
4462 if ( DescTSS.Legacy.Gen.u1DescType
4463 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4464 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4465 {
4466 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4467 u8Vector, SelTSS, DescTSS.Legacy.au64));
4468 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4469 }
4470
4471 /* The TSS must be present. */
4472 if (!DescTSS.Legacy.Gen.u1Present)
4473 {
4474 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4475 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4476 }
4477
4478 /* Do the actual task switch. */
4479 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4480 }
4481
4482 /* A null CS is bad. */
4483 RTSEL NewCS = Idte.Gate.u16Sel;
4484 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4485 {
4486 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4487 return iemRaiseGeneralProtectionFault0(pVCpu);
4488 }
4489
4490 /* Fetch the descriptor for the new CS. */
4491 IEMSELDESC DescCS;
4492 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4493 if (rcStrict != VINF_SUCCESS)
4494 {
4495 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4496 return rcStrict;
4497 }
4498
4499 /* Must be a code segment. */
4500 if (!DescCS.Legacy.Gen.u1DescType)
4501 {
4502 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4503 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4504 }
4505 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4506 {
4507 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4508 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Don't allow lowering the privilege level. */
4512 /** @todo Does the lowering of privileges apply to software interrupts
4513 * only? This has bearings on the more-privileged or
4514 * same-privilege stack behavior further down. A testcase would
4515 * be nice. */
4516 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4517 {
4518 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4519 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4520 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4521 }
4522
4523 /* Make sure the selector is present. */
4524 if (!DescCS.Legacy.Gen.u1Present)
4525 {
4526 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4527 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4528 }
4529
4530 /* Check the new EIP against the new CS limit. */
4531 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4532 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4533 ? Idte.Gate.u16OffsetLow
4534 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4535 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4536 if (uNewEip > cbLimitCS)
4537 {
4538 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4539 u8Vector, uNewEip, cbLimitCS, NewCS));
4540 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4541 }
4542 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4543
4544 /* Calc the flag image to push. */
4545 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4546 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4547 fEfl &= ~X86_EFL_RF;
4548 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4549 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4550
4551 /* From V8086 mode only go to CPL 0. */
4552 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4553 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4554 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4555 {
4556 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4557 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4558 }
4559
4560 /*
4561 * If the privilege level changes, we need to get a new stack from the TSS.
4562 * This in turns means validating the new SS and ESP...
4563 */
4564 if (uNewCpl != pVCpu->iem.s.uCpl)
4565 {
4566 RTSEL NewSS;
4567 uint32_t uNewEsp;
4568 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571
4572 IEMSELDESC DescSS;
4573 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4574 if (rcStrict != VINF_SUCCESS)
4575 return rcStrict;
4576 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4577 if (!DescSS.Legacy.Gen.u1DefBig)
4578 {
4579 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4580 uNewEsp = (uint16_t)uNewEsp;
4581 }
4582
4583 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4584
4585 /* Check that there is sufficient space for the stack frame. */
4586 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4587 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4588 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4589 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4590
4591 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4592 {
4593 if ( uNewEsp - 1 > cbLimitSS
4594 || uNewEsp < cbStackFrame)
4595 {
4596 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4597 u8Vector, NewSS, uNewEsp, cbStackFrame));
4598 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4599 }
4600 }
4601 else
4602 {
4603 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4604 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4605 {
4606 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4607 u8Vector, NewSS, uNewEsp, cbStackFrame));
4608 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4609 }
4610 }
4611
4612 /*
4613 * Start making changes.
4614 */
4615
4616 /* Set the new CPL so that stack accesses use it. */
4617 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4618 pVCpu->iem.s.uCpl = uNewCpl;
4619
4620 /* Create the stack frame. */
4621 RTPTRUNION uStackFrame;
4622 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4623 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4624 if (rcStrict != VINF_SUCCESS)
4625 return rcStrict;
4626 void * const pvStackFrame = uStackFrame.pv;
4627 if (f32BitGate)
4628 {
4629 if (fFlags & IEM_XCPT_FLAGS_ERR)
4630 *uStackFrame.pu32++ = uErr;
4631 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4632 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4633 uStackFrame.pu32[2] = fEfl;
4634 uStackFrame.pu32[3] = pCtx->esp;
4635 uStackFrame.pu32[4] = pCtx->ss.Sel;
4636 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4637 if (fEfl & X86_EFL_VM)
4638 {
4639 uStackFrame.pu32[1] = pCtx->cs.Sel;
4640 uStackFrame.pu32[5] = pCtx->es.Sel;
4641 uStackFrame.pu32[6] = pCtx->ds.Sel;
4642 uStackFrame.pu32[7] = pCtx->fs.Sel;
4643 uStackFrame.pu32[8] = pCtx->gs.Sel;
4644 }
4645 }
4646 else
4647 {
4648 if (fFlags & IEM_XCPT_FLAGS_ERR)
4649 *uStackFrame.pu16++ = uErr;
4650 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4651 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4652 uStackFrame.pu16[2] = fEfl;
4653 uStackFrame.pu16[3] = pCtx->sp;
4654 uStackFrame.pu16[4] = pCtx->ss.Sel;
4655 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4656 if (fEfl & X86_EFL_VM)
4657 {
4658 uStackFrame.pu16[1] = pCtx->cs.Sel;
4659 uStackFrame.pu16[5] = pCtx->es.Sel;
4660 uStackFrame.pu16[6] = pCtx->ds.Sel;
4661 uStackFrame.pu16[7] = pCtx->fs.Sel;
4662 uStackFrame.pu16[8] = pCtx->gs.Sel;
4663 }
4664 }
4665 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4666 if (rcStrict != VINF_SUCCESS)
4667 return rcStrict;
4668
4669 /* Mark the selectors 'accessed' (hope this is the correct time). */
4670 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4671 * after pushing the stack frame? (Write protect the gdt + stack to
4672 * find out.) */
4673 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4674 {
4675 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4676 if (rcStrict != VINF_SUCCESS)
4677 return rcStrict;
4678 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4679 }
4680
4681 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4682 {
4683 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4684 if (rcStrict != VINF_SUCCESS)
4685 return rcStrict;
4686 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4687 }
4688
4689 /*
4690 * Start comitting the register changes (joins with the DPL=CPL branch).
4691 */
4692 pCtx->ss.Sel = NewSS;
4693 pCtx->ss.ValidSel = NewSS;
4694 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4695 pCtx->ss.u32Limit = cbLimitSS;
4696 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4697 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4698 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4699 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4700 * SP is loaded).
4701 * Need to check the other combinations too:
4702 * - 16-bit TSS, 32-bit handler
4703 * - 32-bit TSS, 16-bit handler */
4704 if (!pCtx->ss.Attr.n.u1DefBig)
4705 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4706 else
4707 pCtx->rsp = uNewEsp - cbStackFrame;
4708
4709 if (fEfl & X86_EFL_VM)
4710 {
4711 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4712 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4713 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4714 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4715 }
4716 }
4717 /*
4718 * Same privilege, no stack change and smaller stack frame.
4719 */
4720 else
4721 {
4722 uint64_t uNewRsp;
4723 RTPTRUNION uStackFrame;
4724 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4725 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4726 if (rcStrict != VINF_SUCCESS)
4727 return rcStrict;
4728 void * const pvStackFrame = uStackFrame.pv;
4729
4730 if (f32BitGate)
4731 {
4732 if (fFlags & IEM_XCPT_FLAGS_ERR)
4733 *uStackFrame.pu32++ = uErr;
4734 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4735 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4736 uStackFrame.pu32[2] = fEfl;
4737 }
4738 else
4739 {
4740 if (fFlags & IEM_XCPT_FLAGS_ERR)
4741 *uStackFrame.pu16++ = uErr;
4742 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4743 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4744 uStackFrame.pu16[2] = fEfl;
4745 }
4746 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4747 if (rcStrict != VINF_SUCCESS)
4748 return rcStrict;
4749
4750 /* Mark the CS selector as 'accessed'. */
4751 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4752 {
4753 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4754 if (rcStrict != VINF_SUCCESS)
4755 return rcStrict;
4756 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4757 }
4758
4759 /*
4760 * Start committing the register changes (joins with the other branch).
4761 */
4762 pCtx->rsp = uNewRsp;
4763 }
4764
4765 /* ... register committing continues. */
4766 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4767 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4768 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4769 pCtx->cs.u32Limit = cbLimitCS;
4770 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4771 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4772
4773 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4774 fEfl &= ~fEflToClear;
4775 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4776
4777 if (fFlags & IEM_XCPT_FLAGS_CR2)
4778 pCtx->cr2 = uCr2;
4779
4780 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4781 iemRaiseXcptAdjustState(pCtx, u8Vector);
4782
4783 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4784}
4785
4786
4787/**
4788 * Implements exceptions and interrupts for long mode.
4789 *
4790 * @returns VBox strict status code.
4791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4792 * @param pCtx The CPU context.
4793 * @param cbInstr The number of bytes to offset rIP by in the return
4794 * address.
4795 * @param u8Vector The interrupt / exception vector number.
4796 * @param fFlags The flags.
4797 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4798 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4799 */
4800IEM_STATIC VBOXSTRICTRC
4801iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4802 PCPUMCTX pCtx,
4803 uint8_t cbInstr,
4804 uint8_t u8Vector,
4805 uint32_t fFlags,
4806 uint16_t uErr,
4807 uint64_t uCr2)
4808{
4809 /*
4810 * Read the IDT entry.
4811 */
4812 uint16_t offIdt = (uint16_t)u8Vector << 4;
4813 if (pCtx->idtr.cbIdt < offIdt + 7)
4814 {
4815 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4816 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4817 }
4818 X86DESC64 Idte;
4819 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4820 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4821 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4822 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4823 return rcStrict;
4824 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4825 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4826 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4827
4828 /*
4829 * Check the descriptor type, DPL and such.
4830 * ASSUMES this is done in the same order as described for call-gate calls.
4831 */
4832 if (Idte.Gate.u1DescType)
4833 {
4834 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4835 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4836 }
4837 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4838 switch (Idte.Gate.u4Type)
4839 {
4840 case AMD64_SEL_TYPE_SYS_INT_GATE:
4841 fEflToClear |= X86_EFL_IF;
4842 break;
4843 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4844 break;
4845
4846 default:
4847 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4848 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4849 }
4850
4851 /* Check DPL against CPL if applicable. */
4852 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4853 {
4854 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4855 {
4856 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4857 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4858 }
4859 }
4860
4861 /* Is it there? */
4862 if (!Idte.Gate.u1Present)
4863 {
4864 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4865 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4866 }
4867
4868 /* A null CS is bad. */
4869 RTSEL NewCS = Idte.Gate.u16Sel;
4870 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4871 {
4872 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4873 return iemRaiseGeneralProtectionFault0(pVCpu);
4874 }
4875
4876 /* Fetch the descriptor for the new CS. */
4877 IEMSELDESC DescCS;
4878 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4879 if (rcStrict != VINF_SUCCESS)
4880 {
4881 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4882 return rcStrict;
4883 }
4884
4885 /* Must be a 64-bit code segment. */
4886 if (!DescCS.Long.Gen.u1DescType)
4887 {
4888 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4889 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4890 }
4891 if ( !DescCS.Long.Gen.u1Long
4892 || DescCS.Long.Gen.u1DefBig
4893 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4894 {
4895 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4896 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4897 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4898 }
4899
4900 /* Don't allow lowering the privilege level. For non-conforming CS
4901 selectors, the CS.DPL sets the privilege level the trap/interrupt
4902 handler runs at. For conforming CS selectors, the CPL remains
4903 unchanged, but the CS.DPL must be <= CPL. */
4904 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4905 * when CPU in Ring-0. Result \#GP? */
4906 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4907 {
4908 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4909 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4910 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4911 }
4912
4913
4914 /* Make sure the selector is present. */
4915 if (!DescCS.Legacy.Gen.u1Present)
4916 {
4917 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4918 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4919 }
4920
4921 /* Check that the new RIP is canonical. */
4922 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4923 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4924 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4925 if (!IEM_IS_CANONICAL(uNewRip))
4926 {
4927 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4928 return iemRaiseGeneralProtectionFault0(pVCpu);
4929 }
4930
4931 /*
4932 * If the privilege level changes or if the IST isn't zero, we need to get
4933 * a new stack from the TSS.
4934 */
4935 uint64_t uNewRsp;
4936 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4937 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4938 if ( uNewCpl != pVCpu->iem.s.uCpl
4939 || Idte.Gate.u3IST != 0)
4940 {
4941 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4942 if (rcStrict != VINF_SUCCESS)
4943 return rcStrict;
4944 }
4945 else
4946 uNewRsp = pCtx->rsp;
4947 uNewRsp &= ~(uint64_t)0xf;
4948
4949 /*
4950 * Calc the flag image to push.
4951 */
4952 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4953 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4954 fEfl &= ~X86_EFL_RF;
4955 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4956 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4957
4958 /*
4959 * Start making changes.
4960 */
4961 /* Set the new CPL so that stack accesses use it. */
4962 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4963 pVCpu->iem.s.uCpl = uNewCpl;
4964
4965 /* Create the stack frame. */
4966 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4967 RTPTRUNION uStackFrame;
4968 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4969 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4970 if (rcStrict != VINF_SUCCESS)
4971 return rcStrict;
4972 void * const pvStackFrame = uStackFrame.pv;
4973
4974 if (fFlags & IEM_XCPT_FLAGS_ERR)
4975 *uStackFrame.pu64++ = uErr;
4976 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4977 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4978 uStackFrame.pu64[2] = fEfl;
4979 uStackFrame.pu64[3] = pCtx->rsp;
4980 uStackFrame.pu64[4] = pCtx->ss.Sel;
4981 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4982 if (rcStrict != VINF_SUCCESS)
4983 return rcStrict;
4984
4985 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4986 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4987 * after pushing the stack frame? (Write protect the gdt + stack to
4988 * find out.) */
4989 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4990 {
4991 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4992 if (rcStrict != VINF_SUCCESS)
4993 return rcStrict;
4994 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4995 }
4996
4997 /*
4998 * Start comitting the register changes.
4999 */
5000 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5001 * hidden registers when interrupting 32-bit or 16-bit code! */
5002 if (uNewCpl != uOldCpl)
5003 {
5004 pCtx->ss.Sel = 0 | uNewCpl;
5005 pCtx->ss.ValidSel = 0 | uNewCpl;
5006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5007 pCtx->ss.u32Limit = UINT32_MAX;
5008 pCtx->ss.u64Base = 0;
5009 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5010 }
5011 pCtx->rsp = uNewRsp - cbStackFrame;
5012 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5013 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5014 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5015 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5016 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5017 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5018 pCtx->rip = uNewRip;
5019
5020 fEfl &= ~fEflToClear;
5021 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5022
5023 if (fFlags & IEM_XCPT_FLAGS_CR2)
5024 pCtx->cr2 = uCr2;
5025
5026 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5027 iemRaiseXcptAdjustState(pCtx, u8Vector);
5028
5029 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5030}
5031
5032
5033/**
5034 * Implements exceptions and interrupts.
5035 *
5036 * All exceptions and interrupts goes thru this function!
5037 *
5038 * @returns VBox strict status code.
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 * @param cbInstr The number of bytes to offset rIP by in the return
5041 * address.
5042 * @param u8Vector The interrupt / exception vector number.
5043 * @param fFlags The flags.
5044 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5045 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5046 */
5047DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5048iemRaiseXcptOrInt(PVMCPU pVCpu,
5049 uint8_t cbInstr,
5050 uint8_t u8Vector,
5051 uint32_t fFlags,
5052 uint16_t uErr,
5053 uint64_t uCr2)
5054{
5055 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5056#ifdef IN_RING0
5057 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5058 AssertRCReturn(rc, rc);
5059#endif
5060
5061#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5062 /*
5063 * Flush prefetch buffer
5064 */
5065 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5066#endif
5067
5068 /*
5069 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5070 */
5071 if ( pCtx->eflags.Bits.u1VM
5072 && pCtx->eflags.Bits.u2IOPL != 3
5073 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5074 && (pCtx->cr0 & X86_CR0_PE) )
5075 {
5076 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5077 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5078 u8Vector = X86_XCPT_GP;
5079 uErr = 0;
5080 }
5081#ifdef DBGFTRACE_ENABLED
5082 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5083 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5084 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5085#endif
5086
5087 /*
5088 * Do recursion accounting.
5089 */
5090 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5091 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5092 if (pVCpu->iem.s.cXcptRecursions == 0)
5093 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5094 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5095 else
5096 {
5097 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5098 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5099
5100 /** @todo double and tripple faults. */
5101 if (pVCpu->iem.s.cXcptRecursions >= 3)
5102 {
5103#ifdef DEBUG_bird
5104 AssertFailed();
5105#endif
5106 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5107 }
5108
5109 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5110 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5111 {
5112 ....
5113 } */
5114 }
5115 pVCpu->iem.s.cXcptRecursions++;
5116 pVCpu->iem.s.uCurXcpt = u8Vector;
5117 pVCpu->iem.s.fCurXcpt = fFlags;
5118
5119 /*
5120 * Extensive logging.
5121 */
5122#if defined(LOG_ENABLED) && defined(IN_RING3)
5123 if (LogIs3Enabled())
5124 {
5125 PVM pVM = pVCpu->CTX_SUFF(pVM);
5126 char szRegs[4096];
5127 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5128 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5129 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5130 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5131 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5132 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5133 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5134 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5135 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5136 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5137 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5138 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5139 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5140 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5141 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5142 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5143 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5144 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5145 " efer=%016VR{efer}\n"
5146 " pat=%016VR{pat}\n"
5147 " sf_mask=%016VR{sf_mask}\n"
5148 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5149 " lstar=%016VR{lstar}\n"
5150 " star=%016VR{star} cstar=%016VR{cstar}\n"
5151 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5152 );
5153
5154 char szInstr[256];
5155 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5156 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5157 szInstr, sizeof(szInstr), NULL);
5158 Log3(("%s%s\n", szRegs, szInstr));
5159 }
5160#endif /* LOG_ENABLED */
5161
5162 /*
5163 * Call the mode specific worker function.
5164 */
5165 VBOXSTRICTRC rcStrict;
5166 if (!(pCtx->cr0 & X86_CR0_PE))
5167 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5168 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5169 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5170 else
5171 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5172
5173 /* Flush the prefetch buffer. */
5174#ifdef IEM_WITH_CODE_TLB
5175 pVCpu->iem.s.pbInstrBuf = NULL;
5176#else
5177 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5178#endif
5179
5180 /*
5181 * Unwind.
5182 */
5183 pVCpu->iem.s.cXcptRecursions--;
5184 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5185 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5186 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5187 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5188 return rcStrict;
5189}
5190
5191#ifdef IEM_WITH_SETJMP
5192/**
5193 * See iemRaiseXcptOrInt. Will not return.
5194 */
5195IEM_STATIC DECL_NO_RETURN(void)
5196iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5197 uint8_t cbInstr,
5198 uint8_t u8Vector,
5199 uint32_t fFlags,
5200 uint16_t uErr,
5201 uint64_t uCr2)
5202{
5203 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5204 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5205}
5206#endif
5207
5208
5209/** \#DE - 00. */
5210DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5211{
5212 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5213}
5214
5215
5216/** \#DB - 01.
5217 * @note This automatically clear DR7.GD. */
5218DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5219{
5220 /** @todo set/clear RF. */
5221 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5223}
5224
5225
5226/** \#UD - 06. */
5227DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5228{
5229 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5230}
5231
5232
5233/** \#NM - 07. */
5234DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5235{
5236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5237}
5238
5239
5240/** \#TS(err) - 0a. */
5241DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5242{
5243 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5244}
5245
5246
5247/** \#TS(tr) - 0a. */
5248DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5249{
5250 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5251 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5252}
5253
5254
5255/** \#TS(0) - 0a. */
5256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5257{
5258 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5259 0, 0);
5260}
5261
5262
5263/** \#TS(err) - 0a. */
5264DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5265{
5266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5267 uSel & X86_SEL_MASK_OFF_RPL, 0);
5268}
5269
5270
5271/** \#NP(err) - 0b. */
5272DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5273{
5274 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5275}
5276
5277
5278/** \#NP(sel) - 0b. */
5279DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5280{
5281 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5282 uSel & ~X86_SEL_RPL, 0);
5283}
5284
5285
5286/** \#SS(seg) - 0c. */
5287DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5288{
5289 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5290 uSel & ~X86_SEL_RPL, 0);
5291}
5292
5293
5294/** \#SS(err) - 0c. */
5295DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5296{
5297 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5298}
5299
5300
5301/** \#GP(n) - 0d. */
5302DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5303{
5304 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5305}
5306
5307
5308/** \#GP(0) - 0d. */
5309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5310{
5311 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5312}
5313
5314#ifdef IEM_WITH_SETJMP
5315/** \#GP(0) - 0d. */
5316DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5317{
5318 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5319}
5320#endif
5321
5322
5323/** \#GP(sel) - 0d. */
5324DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5325{
5326 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5327 Sel & ~X86_SEL_RPL, 0);
5328}
5329
5330
5331/** \#GP(0) - 0d. */
5332DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5333{
5334 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5335}
5336
5337
5338/** \#GP(sel) - 0d. */
5339DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5340{
5341 NOREF(iSegReg); NOREF(fAccess);
5342 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5343 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5344}
5345
5346#ifdef IEM_WITH_SETJMP
5347/** \#GP(sel) - 0d, longjmp. */
5348DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5349{
5350 NOREF(iSegReg); NOREF(fAccess);
5351 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5352 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5353}
5354#endif
5355
5356/** \#GP(sel) - 0d. */
5357DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5358{
5359 NOREF(Sel);
5360 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5361}
5362
5363#ifdef IEM_WITH_SETJMP
5364/** \#GP(sel) - 0d, longjmp. */
5365DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5366{
5367 NOREF(Sel);
5368 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5369}
5370#endif
5371
5372
5373/** \#GP(sel) - 0d. */
5374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5375{
5376 NOREF(iSegReg); NOREF(fAccess);
5377 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5378}
5379
5380#ifdef IEM_WITH_SETJMP
5381/** \#GP(sel) - 0d, longjmp. */
5382DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5383 uint32_t fAccess)
5384{
5385 NOREF(iSegReg); NOREF(fAccess);
5386 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5387}
5388#endif
5389
5390
5391/** \#PF(n) - 0e. */
5392DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5393{
5394 uint16_t uErr;
5395 switch (rc)
5396 {
5397 case VERR_PAGE_NOT_PRESENT:
5398 case VERR_PAGE_TABLE_NOT_PRESENT:
5399 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5400 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5401 uErr = 0;
5402 break;
5403
5404 default:
5405 AssertMsgFailed(("%Rrc\n", rc));
5406 case VERR_ACCESS_DENIED:
5407 uErr = X86_TRAP_PF_P;
5408 break;
5409
5410 /** @todo reserved */
5411 }
5412
5413 if (pVCpu->iem.s.uCpl == 3)
5414 uErr |= X86_TRAP_PF_US;
5415
5416 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5417 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5418 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5419 uErr |= X86_TRAP_PF_ID;
5420
5421#if 0 /* This is so much non-sense, really. Why was it done like that? */
5422 /* Note! RW access callers reporting a WRITE protection fault, will clear
5423 the READ flag before calling. So, read-modify-write accesses (RW)
5424 can safely be reported as READ faults. */
5425 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5426 uErr |= X86_TRAP_PF_RW;
5427#else
5428 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5429 {
5430 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5431 uErr |= X86_TRAP_PF_RW;
5432 }
5433#endif
5434
5435 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5436 uErr, GCPtrWhere);
5437}
5438
5439#ifdef IEM_WITH_SETJMP
5440/** \#PF(n) - 0e, longjmp. */
5441IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5442{
5443 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5444}
5445#endif
5446
5447
5448/** \#MF(0) - 10. */
5449DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5450{
5451 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5452}
5453
5454
5455/** \#AC(0) - 11. */
5456DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5457{
5458 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5459}
5460
5461
5462/**
5463 * Macro for calling iemCImplRaiseDivideError().
5464 *
5465 * This enables us to add/remove arguments and force different levels of
5466 * inlining as we wish.
5467 *
5468 * @return Strict VBox status code.
5469 */
5470#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5471IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5472{
5473 NOREF(cbInstr);
5474 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5475}
5476
5477
5478/**
5479 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5480 *
5481 * This enables us to add/remove arguments and force different levels of
5482 * inlining as we wish.
5483 *
5484 * @return Strict VBox status code.
5485 */
5486#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5487IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5488{
5489 NOREF(cbInstr);
5490 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5491}
5492
5493
5494/**
5495 * Macro for calling iemCImplRaiseInvalidOpcode().
5496 *
5497 * This enables us to add/remove arguments and force different levels of
5498 * inlining as we wish.
5499 *
5500 * @return Strict VBox status code.
5501 */
5502#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5503IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5504{
5505 NOREF(cbInstr);
5506 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5507}
5508
5509
5510/** @} */
5511
5512
5513/*
5514 *
5515 * Helpers routines.
5516 * Helpers routines.
5517 * Helpers routines.
5518 *
5519 */
5520
5521/**
5522 * Recalculates the effective operand size.
5523 *
5524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5525 */
5526IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5527{
5528 switch (pVCpu->iem.s.enmCpuMode)
5529 {
5530 case IEMMODE_16BIT:
5531 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5532 break;
5533 case IEMMODE_32BIT:
5534 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5535 break;
5536 case IEMMODE_64BIT:
5537 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5538 {
5539 case 0:
5540 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5541 break;
5542 case IEM_OP_PRF_SIZE_OP:
5543 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5544 break;
5545 case IEM_OP_PRF_SIZE_REX_W:
5546 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5547 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5548 break;
5549 }
5550 break;
5551 default:
5552 AssertFailed();
5553 }
5554}
5555
5556
5557/**
5558 * Sets the default operand size to 64-bit and recalculates the effective
5559 * operand size.
5560 *
5561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5562 */
5563IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5564{
5565 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5566 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5567 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5568 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5569 else
5570 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5571}
5572
5573
5574/*
5575 *
5576 * Common opcode decoders.
5577 * Common opcode decoders.
5578 * Common opcode decoders.
5579 *
5580 */
5581//#include <iprt/mem.h>
5582
5583/**
5584 * Used to add extra details about a stub case.
5585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5586 */
5587IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5588{
5589#if defined(LOG_ENABLED) && defined(IN_RING3)
5590 PVM pVM = pVCpu->CTX_SUFF(pVM);
5591 char szRegs[4096];
5592 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5593 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5594 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5595 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5596 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5597 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5598 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5599 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5600 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5601 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5602 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5603 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5604 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5605 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5606 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5607 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5608 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5609 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5610 " efer=%016VR{efer}\n"
5611 " pat=%016VR{pat}\n"
5612 " sf_mask=%016VR{sf_mask}\n"
5613 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5614 " lstar=%016VR{lstar}\n"
5615 " star=%016VR{star} cstar=%016VR{cstar}\n"
5616 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5617 );
5618
5619 char szInstr[256];
5620 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5621 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5622 szInstr, sizeof(szInstr), NULL);
5623
5624 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5625#else
5626 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5627#endif
5628}
5629
5630/**
5631 * Complains about a stub.
5632 *
5633 * Providing two versions of this macro, one for daily use and one for use when
5634 * working on IEM.
5635 */
5636#if 0
5637# define IEMOP_BITCH_ABOUT_STUB() \
5638 do { \
5639 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5640 iemOpStubMsg2(pVCpu); \
5641 RTAssertPanic(); \
5642 } while (0)
5643#else
5644# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5645#endif
5646
5647/** Stubs an opcode. */
5648#define FNIEMOP_STUB(a_Name) \
5649 FNIEMOP_DEF(a_Name) \
5650 { \
5651 RT_NOREF_PV(pVCpu); \
5652 IEMOP_BITCH_ABOUT_STUB(); \
5653 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5654 } \
5655 typedef int ignore_semicolon
5656
5657/** Stubs an opcode. */
5658#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5659 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5660 { \
5661 RT_NOREF_PV(pVCpu); \
5662 RT_NOREF_PV(a_Name0); \
5663 IEMOP_BITCH_ABOUT_STUB(); \
5664 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5665 } \
5666 typedef int ignore_semicolon
5667
5668/** Stubs an opcode which currently should raise \#UD. */
5669#define FNIEMOP_UD_STUB(a_Name) \
5670 FNIEMOP_DEF(a_Name) \
5671 { \
5672 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5673 return IEMOP_RAISE_INVALID_OPCODE(); \
5674 } \
5675 typedef int ignore_semicolon
5676
5677/** Stubs an opcode which currently should raise \#UD. */
5678#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5679 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5680 { \
5681 RT_NOREF_PV(pVCpu); \
5682 RT_NOREF_PV(a_Name0); \
5683 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5684 return IEMOP_RAISE_INVALID_OPCODE(); \
5685 } \
5686 typedef int ignore_semicolon
5687
5688
5689
5690/** @name Register Access.
5691 * @{
5692 */
5693
5694/**
5695 * Gets a reference (pointer) to the specified hidden segment register.
5696 *
5697 * @returns Hidden register reference.
5698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5699 * @param iSegReg The segment register.
5700 */
5701IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5702{
5703 Assert(iSegReg < X86_SREG_COUNT);
5704 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5705 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5706
5707#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5708 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5709 { /* likely */ }
5710 else
5711 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5712#else
5713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5714#endif
5715 return pSReg;
5716}
5717
5718
5719/**
5720 * Ensures that the given hidden segment register is up to date.
5721 *
5722 * @returns Hidden register reference.
5723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5724 * @param pSReg The segment register.
5725 */
5726IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5727{
5728#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5729 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5730 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5731#else
5732 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5733 NOREF(pVCpu);
5734#endif
5735 return pSReg;
5736}
5737
5738
5739/**
5740 * Gets a reference (pointer) to the specified segment register (the selector
5741 * value).
5742 *
5743 * @returns Pointer to the selector variable.
5744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5745 * @param iSegReg The segment register.
5746 */
5747DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5748{
5749 Assert(iSegReg < X86_SREG_COUNT);
5750 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5751 return &pCtx->aSRegs[iSegReg].Sel;
5752}
5753
5754
5755/**
5756 * Fetches the selector value of a segment register.
5757 *
5758 * @returns The selector value.
5759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5760 * @param iSegReg The segment register.
5761 */
5762DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5763{
5764 Assert(iSegReg < X86_SREG_COUNT);
5765 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5766}
5767
5768
5769/**
5770 * Gets a reference (pointer) to the specified general purpose register.
5771 *
5772 * @returns Register reference.
5773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5774 * @param iReg The general purpose register.
5775 */
5776DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5777{
5778 Assert(iReg < 16);
5779 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5780 return &pCtx->aGRegs[iReg];
5781}
5782
5783
5784/**
5785 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5786 *
5787 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5788 *
5789 * @returns Register reference.
5790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5791 * @param iReg The register.
5792 */
5793DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5794{
5795 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5796 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5797 {
5798 Assert(iReg < 16);
5799 return &pCtx->aGRegs[iReg].u8;
5800 }
5801 /* high 8-bit register. */
5802 Assert(iReg < 8);
5803 return &pCtx->aGRegs[iReg & 3].bHi;
5804}
5805
5806
5807/**
5808 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5809 *
5810 * @returns Register reference.
5811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5812 * @param iReg The register.
5813 */
5814DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5815{
5816 Assert(iReg < 16);
5817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5818 return &pCtx->aGRegs[iReg].u16;
5819}
5820
5821
5822/**
5823 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5824 *
5825 * @returns Register reference.
5826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5827 * @param iReg The register.
5828 */
5829DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5830{
5831 Assert(iReg < 16);
5832 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5833 return &pCtx->aGRegs[iReg].u32;
5834}
5835
5836
5837/**
5838 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5839 *
5840 * @returns Register reference.
5841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5842 * @param iReg The register.
5843 */
5844DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5845{
5846 Assert(iReg < 64);
5847 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5848 return &pCtx->aGRegs[iReg].u64;
5849}
5850
5851
5852/**
5853 * Fetches the value of a 8-bit general purpose register.
5854 *
5855 * @returns The register value.
5856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5857 * @param iReg The register.
5858 */
5859DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5860{
5861 return *iemGRegRefU8(pVCpu, iReg);
5862}
5863
5864
5865/**
5866 * Fetches the value of a 16-bit general purpose register.
5867 *
5868 * @returns The register value.
5869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5870 * @param iReg The register.
5871 */
5872DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5873{
5874 Assert(iReg < 16);
5875 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5876}
5877
5878
5879/**
5880 * Fetches the value of a 32-bit general purpose register.
5881 *
5882 * @returns The register value.
5883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5884 * @param iReg The register.
5885 */
5886DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5887{
5888 Assert(iReg < 16);
5889 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5890}
5891
5892
5893/**
5894 * Fetches the value of a 64-bit general purpose register.
5895 *
5896 * @returns The register value.
5897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5898 * @param iReg The register.
5899 */
5900DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5901{
5902 Assert(iReg < 16);
5903 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5904}
5905
5906
5907/**
5908 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5909 *
5910 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5911 * segment limit.
5912 *
5913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5914 * @param offNextInstr The offset of the next instruction.
5915 */
5916IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5917{
5918 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5919 switch (pVCpu->iem.s.enmEffOpSize)
5920 {
5921 case IEMMODE_16BIT:
5922 {
5923 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5924 if ( uNewIp > pCtx->cs.u32Limit
5925 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5926 return iemRaiseGeneralProtectionFault0(pVCpu);
5927 pCtx->rip = uNewIp;
5928 break;
5929 }
5930
5931 case IEMMODE_32BIT:
5932 {
5933 Assert(pCtx->rip <= UINT32_MAX);
5934 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5935
5936 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5937 if (uNewEip > pCtx->cs.u32Limit)
5938 return iemRaiseGeneralProtectionFault0(pVCpu);
5939 pCtx->rip = uNewEip;
5940 break;
5941 }
5942
5943 case IEMMODE_64BIT:
5944 {
5945 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5946
5947 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5948 if (!IEM_IS_CANONICAL(uNewRip))
5949 return iemRaiseGeneralProtectionFault0(pVCpu);
5950 pCtx->rip = uNewRip;
5951 break;
5952 }
5953
5954 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5955 }
5956
5957 pCtx->eflags.Bits.u1RF = 0;
5958
5959#ifndef IEM_WITH_CODE_TLB
5960 /* Flush the prefetch buffer. */
5961 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5962#endif
5963
5964 return VINF_SUCCESS;
5965}
5966
5967
5968/**
5969 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5970 *
5971 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5972 * segment limit.
5973 *
5974 * @returns Strict VBox status code.
5975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5976 * @param offNextInstr The offset of the next instruction.
5977 */
5978IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5979{
5980 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5981 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5982
5983 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5984 if ( uNewIp > pCtx->cs.u32Limit
5985 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5986 return iemRaiseGeneralProtectionFault0(pVCpu);
5987 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5988 pCtx->rip = uNewIp;
5989 pCtx->eflags.Bits.u1RF = 0;
5990
5991#ifndef IEM_WITH_CODE_TLB
5992 /* Flush the prefetch buffer. */
5993 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5994#endif
5995
5996 return VINF_SUCCESS;
5997}
5998
5999
6000/**
6001 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6002 *
6003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6004 * segment limit.
6005 *
6006 * @returns Strict VBox status code.
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 * @param offNextInstr The offset of the next instruction.
6009 */
6010IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6011{
6012 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6013 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6014
6015 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6016 {
6017 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6018
6019 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6020 if (uNewEip > pCtx->cs.u32Limit)
6021 return iemRaiseGeneralProtectionFault0(pVCpu);
6022 pCtx->rip = uNewEip;
6023 }
6024 else
6025 {
6026 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6027
6028 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6029 if (!IEM_IS_CANONICAL(uNewRip))
6030 return iemRaiseGeneralProtectionFault0(pVCpu);
6031 pCtx->rip = uNewRip;
6032 }
6033 pCtx->eflags.Bits.u1RF = 0;
6034
6035#ifndef IEM_WITH_CODE_TLB
6036 /* Flush the prefetch buffer. */
6037 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6038#endif
6039
6040 return VINF_SUCCESS;
6041}
6042
6043
6044/**
6045 * Performs a near jump to the specified address.
6046 *
6047 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6048 * segment limit.
6049 *
6050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6051 * @param uNewRip The new RIP value.
6052 */
6053IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6054{
6055 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6056 switch (pVCpu->iem.s.enmEffOpSize)
6057 {
6058 case IEMMODE_16BIT:
6059 {
6060 Assert(uNewRip <= UINT16_MAX);
6061 if ( uNewRip > pCtx->cs.u32Limit
6062 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6063 return iemRaiseGeneralProtectionFault0(pVCpu);
6064 /** @todo Test 16-bit jump in 64-bit mode. */
6065 pCtx->rip = uNewRip;
6066 break;
6067 }
6068
6069 case IEMMODE_32BIT:
6070 {
6071 Assert(uNewRip <= UINT32_MAX);
6072 Assert(pCtx->rip <= UINT32_MAX);
6073 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6074
6075 if (uNewRip > pCtx->cs.u32Limit)
6076 return iemRaiseGeneralProtectionFault0(pVCpu);
6077 pCtx->rip = uNewRip;
6078 break;
6079 }
6080
6081 case IEMMODE_64BIT:
6082 {
6083 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6084
6085 if (!IEM_IS_CANONICAL(uNewRip))
6086 return iemRaiseGeneralProtectionFault0(pVCpu);
6087 pCtx->rip = uNewRip;
6088 break;
6089 }
6090
6091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6092 }
6093
6094 pCtx->eflags.Bits.u1RF = 0;
6095
6096#ifndef IEM_WITH_CODE_TLB
6097 /* Flush the prefetch buffer. */
6098 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6099#endif
6100
6101 return VINF_SUCCESS;
6102}
6103
6104
6105/**
6106 * Get the address of the top of the stack.
6107 *
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 * @param pCtx The CPU context which SP/ESP/RSP should be
6110 * read.
6111 */
6112DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6113{
6114 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6115 return pCtx->rsp;
6116 if (pCtx->ss.Attr.n.u1DefBig)
6117 return pCtx->esp;
6118 return pCtx->sp;
6119}
6120
6121
6122/**
6123 * Updates the RIP/EIP/IP to point to the next instruction.
6124 *
6125 * This function leaves the EFLAGS.RF flag alone.
6126 *
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param cbInstr The number of bytes to add.
6129 */
6130IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6131{
6132 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6133 switch (pVCpu->iem.s.enmCpuMode)
6134 {
6135 case IEMMODE_16BIT:
6136 Assert(pCtx->rip <= UINT16_MAX);
6137 pCtx->eip += cbInstr;
6138 pCtx->eip &= UINT32_C(0xffff);
6139 break;
6140
6141 case IEMMODE_32BIT:
6142 pCtx->eip += cbInstr;
6143 Assert(pCtx->rip <= UINT32_MAX);
6144 break;
6145
6146 case IEMMODE_64BIT:
6147 pCtx->rip += cbInstr;
6148 break;
6149 default: AssertFailed();
6150 }
6151}
6152
6153
6154#if 0
6155/**
6156 * Updates the RIP/EIP/IP to point to the next instruction.
6157 *
6158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6159 */
6160IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6161{
6162 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6163}
6164#endif
6165
6166
6167
6168/**
6169 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6170 *
6171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6172 * @param cbInstr The number of bytes to add.
6173 */
6174IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6175{
6176 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6177
6178 pCtx->eflags.Bits.u1RF = 0;
6179
6180 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6181#if ARCH_BITS >= 64
6182 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6183 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6184 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6185#else
6186 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6187 pCtx->rip += cbInstr;
6188 else
6189 {
6190 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6191 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6192 }
6193#endif
6194}
6195
6196
6197/**
6198 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6199 *
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 */
6202IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6203{
6204 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6205}
6206
6207
6208/**
6209 * Adds to the stack pointer.
6210 *
6211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6212 * @param pCtx The CPU context which SP/ESP/RSP should be
6213 * updated.
6214 * @param cbToAdd The number of bytes to add (8-bit!).
6215 */
6216DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6217{
6218 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6219 pCtx->rsp += cbToAdd;
6220 else if (pCtx->ss.Attr.n.u1DefBig)
6221 pCtx->esp += cbToAdd;
6222 else
6223 pCtx->sp += cbToAdd;
6224}
6225
6226
6227/**
6228 * Subtracts from the stack pointer.
6229 *
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param pCtx The CPU context which SP/ESP/RSP should be
6232 * updated.
6233 * @param cbToSub The number of bytes to subtract (8-bit!).
6234 */
6235DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6236{
6237 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6238 pCtx->rsp -= cbToSub;
6239 else if (pCtx->ss.Attr.n.u1DefBig)
6240 pCtx->esp -= cbToSub;
6241 else
6242 pCtx->sp -= cbToSub;
6243}
6244
6245
6246/**
6247 * Adds to the temporary stack pointer.
6248 *
6249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6250 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6251 * @param cbToAdd The number of bytes to add (16-bit).
6252 * @param pCtx Where to get the current stack mode.
6253 */
6254DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6255{
6256 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6257 pTmpRsp->u += cbToAdd;
6258 else if (pCtx->ss.Attr.n.u1DefBig)
6259 pTmpRsp->DWords.dw0 += cbToAdd;
6260 else
6261 pTmpRsp->Words.w0 += cbToAdd;
6262}
6263
6264
6265/**
6266 * Subtracts from the temporary stack pointer.
6267 *
6268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6269 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6270 * @param cbToSub The number of bytes to subtract.
6271 * @param pCtx Where to get the current stack mode.
6272 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6273 * expecting that.
6274 */
6275DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6276{
6277 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6278 pTmpRsp->u -= cbToSub;
6279 else if (pCtx->ss.Attr.n.u1DefBig)
6280 pTmpRsp->DWords.dw0 -= cbToSub;
6281 else
6282 pTmpRsp->Words.w0 -= cbToSub;
6283}
6284
6285
6286/**
6287 * Calculates the effective stack address for a push of the specified size as
6288 * well as the new RSP value (upper bits may be masked).
6289 *
6290 * @returns Effective stack addressf for the push.
6291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6292 * @param pCtx Where to get the current stack mode.
6293 * @param cbItem The size of the stack item to pop.
6294 * @param puNewRsp Where to return the new RSP value.
6295 */
6296DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6297{
6298 RTUINT64U uTmpRsp;
6299 RTGCPTR GCPtrTop;
6300 uTmpRsp.u = pCtx->rsp;
6301
6302 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6303 GCPtrTop = uTmpRsp.u -= cbItem;
6304 else if (pCtx->ss.Attr.n.u1DefBig)
6305 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6306 else
6307 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6308 *puNewRsp = uTmpRsp.u;
6309 return GCPtrTop;
6310}
6311
6312
6313/**
6314 * Gets the current stack pointer and calculates the value after a pop of the
6315 * specified size.
6316 *
6317 * @returns Current stack pointer.
6318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6319 * @param pCtx Where to get the current stack mode.
6320 * @param cbItem The size of the stack item to pop.
6321 * @param puNewRsp Where to return the new RSP value.
6322 */
6323DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6324{
6325 RTUINT64U uTmpRsp;
6326 RTGCPTR GCPtrTop;
6327 uTmpRsp.u = pCtx->rsp;
6328
6329 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6330 {
6331 GCPtrTop = uTmpRsp.u;
6332 uTmpRsp.u += cbItem;
6333 }
6334 else if (pCtx->ss.Attr.n.u1DefBig)
6335 {
6336 GCPtrTop = uTmpRsp.DWords.dw0;
6337 uTmpRsp.DWords.dw0 += cbItem;
6338 }
6339 else
6340 {
6341 GCPtrTop = uTmpRsp.Words.w0;
6342 uTmpRsp.Words.w0 += cbItem;
6343 }
6344 *puNewRsp = uTmpRsp.u;
6345 return GCPtrTop;
6346}
6347
6348
6349/**
6350 * Calculates the effective stack address for a push of the specified size as
6351 * well as the new temporary RSP value (upper bits may be masked).
6352 *
6353 * @returns Effective stack addressf for the push.
6354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6355 * @param pCtx Where to get the current stack mode.
6356 * @param pTmpRsp The temporary stack pointer. This is updated.
6357 * @param cbItem The size of the stack item to pop.
6358 */
6359DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6360{
6361 RTGCPTR GCPtrTop;
6362
6363 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6364 GCPtrTop = pTmpRsp->u -= cbItem;
6365 else if (pCtx->ss.Attr.n.u1DefBig)
6366 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6367 else
6368 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6369 return GCPtrTop;
6370}
6371
6372
6373/**
6374 * Gets the effective stack address for a pop of the specified size and
6375 * calculates and updates the temporary RSP.
6376 *
6377 * @returns Current stack pointer.
6378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6379 * @param pCtx Where to get the current stack mode.
6380 * @param pTmpRsp The temporary stack pointer. This is updated.
6381 * @param cbItem The size of the stack item to pop.
6382 */
6383DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6384{
6385 RTGCPTR GCPtrTop;
6386 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6387 {
6388 GCPtrTop = pTmpRsp->u;
6389 pTmpRsp->u += cbItem;
6390 }
6391 else if (pCtx->ss.Attr.n.u1DefBig)
6392 {
6393 GCPtrTop = pTmpRsp->DWords.dw0;
6394 pTmpRsp->DWords.dw0 += cbItem;
6395 }
6396 else
6397 {
6398 GCPtrTop = pTmpRsp->Words.w0;
6399 pTmpRsp->Words.w0 += cbItem;
6400 }
6401 return GCPtrTop;
6402}
6403
6404/** @} */
6405
6406
6407/** @name FPU access and helpers.
6408 *
6409 * @{
6410 */
6411
6412
6413/**
6414 * Hook for preparing to use the host FPU.
6415 *
6416 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6417 *
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 */
6420DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6421{
6422#ifdef IN_RING3
6423 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6424#else
6425 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6426#endif
6427}
6428
6429
6430/**
6431 * Hook for preparing to use the host FPU for SSE
6432 *
6433 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6434 *
6435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6436 */
6437DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6438{
6439 iemFpuPrepareUsage(pVCpu);
6440}
6441
6442
6443/**
6444 * Hook for actualizing the guest FPU state before the interpreter reads it.
6445 *
6446 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6447 *
6448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6449 */
6450DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6451{
6452#ifdef IN_RING3
6453 NOREF(pVCpu);
6454#else
6455 CPUMRZFpuStateActualizeForRead(pVCpu);
6456#endif
6457}
6458
6459
6460/**
6461 * Hook for actualizing the guest FPU state before the interpreter changes it.
6462 *
6463 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6464 *
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 */
6467DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6468{
6469#ifdef IN_RING3
6470 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6471#else
6472 CPUMRZFpuStateActualizeForChange(pVCpu);
6473#endif
6474}
6475
6476
6477/**
6478 * Hook for actualizing the guest XMM0..15 register state for read only.
6479 *
6480 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 */
6484DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6485{
6486#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6487 NOREF(pVCpu);
6488#else
6489 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6490#endif
6491}
6492
6493
6494/**
6495 * Hook for actualizing the guest XMM0..15 register state for read+write.
6496 *
6497 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6498 *
6499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6500 */
6501DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6502{
6503#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6504 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6505#else
6506 CPUMRZFpuStateActualizeForChange(pVCpu);
6507#endif
6508}
6509
6510
6511/**
6512 * Stores a QNaN value into a FPU register.
6513 *
6514 * @param pReg Pointer to the register.
6515 */
6516DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6517{
6518 pReg->au32[0] = UINT32_C(0x00000000);
6519 pReg->au32[1] = UINT32_C(0xc0000000);
6520 pReg->au16[4] = UINT16_C(0xffff);
6521}
6522
6523
6524/**
6525 * Updates the FOP, FPU.CS and FPUIP registers.
6526 *
6527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6528 * @param pCtx The CPU context.
6529 * @param pFpuCtx The FPU context.
6530 */
6531DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6532{
6533 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6534 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6535 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6536 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6537 {
6538 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6539 * happens in real mode here based on the fnsave and fnstenv images. */
6540 pFpuCtx->CS = 0;
6541 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6542 }
6543 else
6544 {
6545 pFpuCtx->CS = pCtx->cs.Sel;
6546 pFpuCtx->FPUIP = pCtx->rip;
6547 }
6548}
6549
6550
6551/**
6552 * Updates the x87.DS and FPUDP registers.
6553 *
6554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6555 * @param pCtx The CPU context.
6556 * @param pFpuCtx The FPU context.
6557 * @param iEffSeg The effective segment register.
6558 * @param GCPtrEff The effective address relative to @a iEffSeg.
6559 */
6560DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6561{
6562 RTSEL sel;
6563 switch (iEffSeg)
6564 {
6565 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6566 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6567 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6568 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6569 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6570 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6571 default:
6572 AssertMsgFailed(("%d\n", iEffSeg));
6573 sel = pCtx->ds.Sel;
6574 }
6575 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6576 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6577 {
6578 pFpuCtx->DS = 0;
6579 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6580 }
6581 else
6582 {
6583 pFpuCtx->DS = sel;
6584 pFpuCtx->FPUDP = GCPtrEff;
6585 }
6586}
6587
6588
6589/**
6590 * Rotates the stack registers in the push direction.
6591 *
6592 * @param pFpuCtx The FPU context.
6593 * @remarks This is a complete waste of time, but fxsave stores the registers in
6594 * stack order.
6595 */
6596DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6597{
6598 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6599 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6600 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6601 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6602 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6603 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6604 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6605 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6606 pFpuCtx->aRegs[0].r80 = r80Tmp;
6607}
6608
6609
6610/**
6611 * Rotates the stack registers in the pop direction.
6612 *
6613 * @param pFpuCtx The FPU context.
6614 * @remarks This is a complete waste of time, but fxsave stores the registers in
6615 * stack order.
6616 */
6617DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6618{
6619 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6620 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6621 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6622 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6623 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6624 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6625 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6626 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6627 pFpuCtx->aRegs[7].r80 = r80Tmp;
6628}
6629
6630
6631/**
6632 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6633 * exception prevents it.
6634 *
6635 * @param pResult The FPU operation result to push.
6636 * @param pFpuCtx The FPU context.
6637 */
6638IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6639{
6640 /* Update FSW and bail if there are pending exceptions afterwards. */
6641 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6642 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6643 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6644 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6645 {
6646 pFpuCtx->FSW = fFsw;
6647 return;
6648 }
6649
6650 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6651 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6652 {
6653 /* All is fine, push the actual value. */
6654 pFpuCtx->FTW |= RT_BIT(iNewTop);
6655 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6656 }
6657 else if (pFpuCtx->FCW & X86_FCW_IM)
6658 {
6659 /* Masked stack overflow, push QNaN. */
6660 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6661 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6662 }
6663 else
6664 {
6665 /* Raise stack overflow, don't push anything. */
6666 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6667 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6668 return;
6669 }
6670
6671 fFsw &= ~X86_FSW_TOP_MASK;
6672 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6673 pFpuCtx->FSW = fFsw;
6674
6675 iemFpuRotateStackPush(pFpuCtx);
6676}
6677
6678
6679/**
6680 * Stores a result in a FPU register and updates the FSW and FTW.
6681 *
6682 * @param pFpuCtx The FPU context.
6683 * @param pResult The result to store.
6684 * @param iStReg Which FPU register to store it in.
6685 */
6686IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6687{
6688 Assert(iStReg < 8);
6689 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6690 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6691 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6692 pFpuCtx->FTW |= RT_BIT(iReg);
6693 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6694}
6695
6696
6697/**
6698 * Only updates the FPU status word (FSW) with the result of the current
6699 * instruction.
6700 *
6701 * @param pFpuCtx The FPU context.
6702 * @param u16FSW The FSW output of the current instruction.
6703 */
6704IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6705{
6706 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6707 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6708}
6709
6710
6711/**
6712 * Pops one item off the FPU stack if no pending exception prevents it.
6713 *
6714 * @param pFpuCtx The FPU context.
6715 */
6716IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6717{
6718 /* Check pending exceptions. */
6719 uint16_t uFSW = pFpuCtx->FSW;
6720 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6721 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6722 return;
6723
6724 /* TOP--. */
6725 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6726 uFSW &= ~X86_FSW_TOP_MASK;
6727 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6728 pFpuCtx->FSW = uFSW;
6729
6730 /* Mark the previous ST0 as empty. */
6731 iOldTop >>= X86_FSW_TOP_SHIFT;
6732 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6733
6734 /* Rotate the registers. */
6735 iemFpuRotateStackPop(pFpuCtx);
6736}
6737
6738
6739/**
6740 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6741 *
6742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6743 * @param pResult The FPU operation result to push.
6744 */
6745IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6746{
6747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6748 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6750 iemFpuMaybePushResult(pResult, pFpuCtx);
6751}
6752
6753
6754/**
6755 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6756 * and sets FPUDP and FPUDS.
6757 *
6758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6759 * @param pResult The FPU operation result to push.
6760 * @param iEffSeg The effective segment register.
6761 * @param GCPtrEff The effective address relative to @a iEffSeg.
6762 */
6763IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6764{
6765 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6766 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6767 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6768 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6769 iemFpuMaybePushResult(pResult, pFpuCtx);
6770}
6771
6772
6773/**
6774 * Replace ST0 with the first value and push the second onto the FPU stack,
6775 * unless a pending exception prevents it.
6776 *
6777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6778 * @param pResult The FPU operation result to store and push.
6779 */
6780IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6781{
6782 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6783 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6784 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6785
6786 /* Update FSW and bail if there are pending exceptions afterwards. */
6787 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6788 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6789 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6790 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6791 {
6792 pFpuCtx->FSW = fFsw;
6793 return;
6794 }
6795
6796 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6797 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6798 {
6799 /* All is fine, push the actual value. */
6800 pFpuCtx->FTW |= RT_BIT(iNewTop);
6801 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6802 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6803 }
6804 else if (pFpuCtx->FCW & X86_FCW_IM)
6805 {
6806 /* Masked stack overflow, push QNaN. */
6807 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6808 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6809 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6810 }
6811 else
6812 {
6813 /* Raise stack overflow, don't push anything. */
6814 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6815 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6816 return;
6817 }
6818
6819 fFsw &= ~X86_FSW_TOP_MASK;
6820 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6821 pFpuCtx->FSW = fFsw;
6822
6823 iemFpuRotateStackPush(pFpuCtx);
6824}
6825
6826
6827/**
6828 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6829 * FOP.
6830 *
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 * @param pResult The result to store.
6833 * @param iStReg Which FPU register to store it in.
6834 */
6835IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6836{
6837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6839 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6840 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6841}
6842
6843
6844/**
6845 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6846 * FOP, and then pops the stack.
6847 *
6848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6849 * @param pResult The result to store.
6850 * @param iStReg Which FPU register to store it in.
6851 */
6852IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6853{
6854 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6855 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6856 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6857 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6858 iemFpuMaybePopOne(pFpuCtx);
6859}
6860
6861
6862/**
6863 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6864 * FPUDP, and FPUDS.
6865 *
6866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6867 * @param pResult The result to store.
6868 * @param iStReg Which FPU register to store it in.
6869 * @param iEffSeg The effective memory operand selector register.
6870 * @param GCPtrEff The effective memory operand offset.
6871 */
6872IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6873 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6874{
6875 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6876 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6877 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6878 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6879 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6880}
6881
6882
6883/**
6884 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6885 * FPUDP, and FPUDS, and then pops the stack.
6886 *
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 * @param pResult The result to store.
6889 * @param iStReg Which FPU register to store it in.
6890 * @param iEffSeg The effective memory operand selector register.
6891 * @param GCPtrEff The effective memory operand offset.
6892 */
6893IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6894 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6895{
6896 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6897 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6898 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6899 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6900 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6901 iemFpuMaybePopOne(pFpuCtx);
6902}
6903
6904
6905/**
6906 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6907 *
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 */
6910IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6911{
6912 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6913 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6914 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6915}
6916
6917
6918/**
6919 * Marks the specified stack register as free (for FFREE).
6920 *
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 * @param iStReg The register to free.
6923 */
6924IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6925{
6926 Assert(iStReg < 8);
6927 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6928 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6929 pFpuCtx->FTW &= ~RT_BIT(iReg);
6930}
6931
6932
6933/**
6934 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 */
6938IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6939{
6940 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6941 uint16_t uFsw = pFpuCtx->FSW;
6942 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6943 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6944 uFsw &= ~X86_FSW_TOP_MASK;
6945 uFsw |= uTop;
6946 pFpuCtx->FSW = uFsw;
6947}
6948
6949
6950/**
6951 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6952 *
6953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6954 */
6955IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6956{
6957 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6958 uint16_t uFsw = pFpuCtx->FSW;
6959 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6960 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6961 uFsw &= ~X86_FSW_TOP_MASK;
6962 uFsw |= uTop;
6963 pFpuCtx->FSW = uFsw;
6964}
6965
6966
6967/**
6968 * Updates the FSW, FOP, FPUIP, and FPUCS.
6969 *
6970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6971 * @param u16FSW The FSW from the current instruction.
6972 */
6973IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6974{
6975 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6976 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6977 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6978 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6979}
6980
6981
6982/**
6983 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6984 *
6985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6986 * @param u16FSW The FSW from the current instruction.
6987 */
6988IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6989{
6990 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6991 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6992 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6993 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6994 iemFpuMaybePopOne(pFpuCtx);
6995}
6996
6997
6998/**
6999 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7000 *
7001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7002 * @param u16FSW The FSW from the current instruction.
7003 * @param iEffSeg The effective memory operand selector register.
7004 * @param GCPtrEff The effective memory operand offset.
7005 */
7006IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7007{
7008 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7009 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7010 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7011 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7012 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7013}
7014
7015
7016/**
7017 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param u16FSW The FSW from the current instruction.
7021 */
7022IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7023{
7024 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7025 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7026 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7027 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7028 iemFpuMaybePopOne(pFpuCtx);
7029 iemFpuMaybePopOne(pFpuCtx);
7030}
7031
7032
7033/**
7034 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7035 *
7036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7037 * @param u16FSW The FSW from the current instruction.
7038 * @param iEffSeg The effective memory operand selector register.
7039 * @param GCPtrEff The effective memory operand offset.
7040 */
7041IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7042{
7043 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7044 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7045 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7046 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7047 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7048 iemFpuMaybePopOne(pFpuCtx);
7049}
7050
7051
7052/**
7053 * Worker routine for raising an FPU stack underflow exception.
7054 *
7055 * @param pFpuCtx The FPU context.
7056 * @param iStReg The stack register being accessed.
7057 */
7058IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7059{
7060 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7061 if (pFpuCtx->FCW & X86_FCW_IM)
7062 {
7063 /* Masked underflow. */
7064 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7065 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7066 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7067 if (iStReg != UINT8_MAX)
7068 {
7069 pFpuCtx->FTW |= RT_BIT(iReg);
7070 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7071 }
7072 }
7073 else
7074 {
7075 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7076 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7077 }
7078}
7079
7080
7081/**
7082 * Raises a FPU stack underflow exception.
7083 *
7084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7085 * @param iStReg The destination register that should be loaded
7086 * with QNaN if \#IS is not masked. Specify
7087 * UINT8_MAX if none (like for fcom).
7088 */
7089DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7090{
7091 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7092 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7093 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7094 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7095}
7096
7097
7098DECL_NO_INLINE(IEM_STATIC, void)
7099iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7100{
7101 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7102 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7103 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7104 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7105 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7106}
7107
7108
7109DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7110{
7111 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7112 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7113 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7114 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7115 iemFpuMaybePopOne(pFpuCtx);
7116}
7117
7118
7119DECL_NO_INLINE(IEM_STATIC, void)
7120iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7121{
7122 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7123 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7124 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7125 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7126 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7127 iemFpuMaybePopOne(pFpuCtx);
7128}
7129
7130
7131DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7132{
7133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7134 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7135 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7136 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7137 iemFpuMaybePopOne(pFpuCtx);
7138 iemFpuMaybePopOne(pFpuCtx);
7139}
7140
7141
7142DECL_NO_INLINE(IEM_STATIC, void)
7143iemFpuStackPushUnderflow(PVMCPU pVCpu)
7144{
7145 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7146 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7147 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7148
7149 if (pFpuCtx->FCW & X86_FCW_IM)
7150 {
7151 /* Masked overflow - Push QNaN. */
7152 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7153 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7154 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7155 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7156 pFpuCtx->FTW |= RT_BIT(iNewTop);
7157 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7158 iemFpuRotateStackPush(pFpuCtx);
7159 }
7160 else
7161 {
7162 /* Exception pending - don't change TOP or the register stack. */
7163 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7164 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7165 }
7166}
7167
7168
7169DECL_NO_INLINE(IEM_STATIC, void)
7170iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7171{
7172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7173 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7174 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7175
7176 if (pFpuCtx->FCW & X86_FCW_IM)
7177 {
7178 /* Masked overflow - Push QNaN. */
7179 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7180 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7181 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7182 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7183 pFpuCtx->FTW |= RT_BIT(iNewTop);
7184 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7185 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7186 iemFpuRotateStackPush(pFpuCtx);
7187 }
7188 else
7189 {
7190 /* Exception pending - don't change TOP or the register stack. */
7191 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7192 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7193 }
7194}
7195
7196
7197/**
7198 * Worker routine for raising an FPU stack overflow exception on a push.
7199 *
7200 * @param pFpuCtx The FPU context.
7201 */
7202IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7203{
7204 if (pFpuCtx->FCW & X86_FCW_IM)
7205 {
7206 /* Masked overflow. */
7207 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7208 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7209 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7210 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7211 pFpuCtx->FTW |= RT_BIT(iNewTop);
7212 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7213 iemFpuRotateStackPush(pFpuCtx);
7214 }
7215 else
7216 {
7217 /* Exception pending - don't change TOP or the register stack. */
7218 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7219 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7220 }
7221}
7222
7223
7224/**
7225 * Raises a FPU stack overflow exception on a push.
7226 *
7227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7228 */
7229DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7230{
7231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7232 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7233 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7234 iemFpuStackPushOverflowOnly(pFpuCtx);
7235}
7236
7237
7238/**
7239 * Raises a FPU stack overflow exception on a push with a memory operand.
7240 *
7241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7242 * @param iEffSeg The effective memory operand selector register.
7243 * @param GCPtrEff The effective memory operand offset.
7244 */
7245DECL_NO_INLINE(IEM_STATIC, void)
7246iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7247{
7248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7249 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7250 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7251 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7252 iemFpuStackPushOverflowOnly(pFpuCtx);
7253}
7254
7255
7256IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7257{
7258 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7259 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7260 if (pFpuCtx->FTW & RT_BIT(iReg))
7261 return VINF_SUCCESS;
7262 return VERR_NOT_FOUND;
7263}
7264
7265
7266IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7267{
7268 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7269 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7270 if (pFpuCtx->FTW & RT_BIT(iReg))
7271 {
7272 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7273 return VINF_SUCCESS;
7274 }
7275 return VERR_NOT_FOUND;
7276}
7277
7278
7279IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7280 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7281{
7282 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7283 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7284 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7285 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7286 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7287 {
7288 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7289 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7290 return VINF_SUCCESS;
7291 }
7292 return VERR_NOT_FOUND;
7293}
7294
7295
7296IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7297{
7298 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7299 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7300 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7301 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7302 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7303 {
7304 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7305 return VINF_SUCCESS;
7306 }
7307 return VERR_NOT_FOUND;
7308}
7309
7310
7311/**
7312 * Updates the FPU exception status after FCW is changed.
7313 *
7314 * @param pFpuCtx The FPU context.
7315 */
7316IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7317{
7318 uint16_t u16Fsw = pFpuCtx->FSW;
7319 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7320 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7321 else
7322 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7323 pFpuCtx->FSW = u16Fsw;
7324}
7325
7326
7327/**
7328 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7329 *
7330 * @returns The full FTW.
7331 * @param pFpuCtx The FPU context.
7332 */
7333IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7334{
7335 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7336 uint16_t u16Ftw = 0;
7337 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7338 for (unsigned iSt = 0; iSt < 8; iSt++)
7339 {
7340 unsigned const iReg = (iSt + iTop) & 7;
7341 if (!(u8Ftw & RT_BIT(iReg)))
7342 u16Ftw |= 3 << (iReg * 2); /* empty */
7343 else
7344 {
7345 uint16_t uTag;
7346 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7347 if (pr80Reg->s.uExponent == 0x7fff)
7348 uTag = 2; /* Exponent is all 1's => Special. */
7349 else if (pr80Reg->s.uExponent == 0x0000)
7350 {
7351 if (pr80Reg->s.u64Mantissa == 0x0000)
7352 uTag = 1; /* All bits are zero => Zero. */
7353 else
7354 uTag = 2; /* Must be special. */
7355 }
7356 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7357 uTag = 0; /* Valid. */
7358 else
7359 uTag = 2; /* Must be special. */
7360
7361 u16Ftw |= uTag << (iReg * 2); /* empty */
7362 }
7363 }
7364
7365 return u16Ftw;
7366}
7367
7368
7369/**
7370 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7371 *
7372 * @returns The compressed FTW.
7373 * @param u16FullFtw The full FTW to convert.
7374 */
7375IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7376{
7377 uint8_t u8Ftw = 0;
7378 for (unsigned i = 0; i < 8; i++)
7379 {
7380 if ((u16FullFtw & 3) != 3 /*empty*/)
7381 u8Ftw |= RT_BIT(i);
7382 u16FullFtw >>= 2;
7383 }
7384
7385 return u8Ftw;
7386}
7387
7388/** @} */
7389
7390
7391/** @name Memory access.
7392 *
7393 * @{
7394 */
7395
7396
7397/**
7398 * Updates the IEMCPU::cbWritten counter if applicable.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 * @param fAccess The access being accounted for.
7402 * @param cbMem The access size.
7403 */
7404DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7405{
7406 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7407 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7408 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7409}
7410
7411
7412/**
7413 * Checks if the given segment can be written to, raise the appropriate
7414 * exception if not.
7415 *
7416 * @returns VBox strict status code.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pHid Pointer to the hidden register.
7420 * @param iSegReg The register number.
7421 * @param pu64BaseAddr Where to return the base address to use for the
7422 * segment. (In 64-bit code it may differ from the
7423 * base in the hidden segment.)
7424 */
7425IEM_STATIC VBOXSTRICTRC
7426iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7427{
7428 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7429 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7430 else
7431 {
7432 if (!pHid->Attr.n.u1Present)
7433 {
7434 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7435 AssertRelease(uSel == 0);
7436 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7437 return iemRaiseGeneralProtectionFault0(pVCpu);
7438 }
7439
7440 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7441 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7442 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7443 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7444 *pu64BaseAddr = pHid->u64Base;
7445 }
7446 return VINF_SUCCESS;
7447}
7448
7449
7450/**
7451 * Checks if the given segment can be read from, raise the appropriate
7452 * exception if not.
7453 *
7454 * @returns VBox strict status code.
7455 *
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 * @param pHid Pointer to the hidden register.
7458 * @param iSegReg The register number.
7459 * @param pu64BaseAddr Where to return the base address to use for the
7460 * segment. (In 64-bit code it may differ from the
7461 * base in the hidden segment.)
7462 */
7463IEM_STATIC VBOXSTRICTRC
7464iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7465{
7466 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7467 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7468 else
7469 {
7470 if (!pHid->Attr.n.u1Present)
7471 {
7472 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7473 AssertRelease(uSel == 0);
7474 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7475 return iemRaiseGeneralProtectionFault0(pVCpu);
7476 }
7477
7478 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7479 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7480 *pu64BaseAddr = pHid->u64Base;
7481 }
7482 return VINF_SUCCESS;
7483}
7484
7485
7486/**
7487 * Applies the segment limit, base and attributes.
7488 *
7489 * This may raise a \#GP or \#SS.
7490 *
7491 * @returns VBox strict status code.
7492 *
7493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7494 * @param fAccess The kind of access which is being performed.
7495 * @param iSegReg The index of the segment register to apply.
7496 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7497 * TSS, ++).
7498 * @param cbMem The access size.
7499 * @param pGCPtrMem Pointer to the guest memory address to apply
7500 * segmentation to. Input and output parameter.
7501 */
7502IEM_STATIC VBOXSTRICTRC
7503iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7504{
7505 if (iSegReg == UINT8_MAX)
7506 return VINF_SUCCESS;
7507
7508 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7509 switch (pVCpu->iem.s.enmCpuMode)
7510 {
7511 case IEMMODE_16BIT:
7512 case IEMMODE_32BIT:
7513 {
7514 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7515 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7516
7517 if ( pSel->Attr.n.u1Present
7518 && !pSel->Attr.n.u1Unusable)
7519 {
7520 Assert(pSel->Attr.n.u1DescType);
7521 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7522 {
7523 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7524 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7525 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7526
7527 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7528 {
7529 /** @todo CPL check. */
7530 }
7531
7532 /*
7533 * There are two kinds of data selectors, normal and expand down.
7534 */
7535 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7536 {
7537 if ( GCPtrFirst32 > pSel->u32Limit
7538 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7539 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7540 }
7541 else
7542 {
7543 /*
7544 * The upper boundary is defined by the B bit, not the G bit!
7545 */
7546 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7547 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7548 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7549 }
7550 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7551 }
7552 else
7553 {
7554
7555 /*
7556 * Code selector and usually be used to read thru, writing is
7557 * only permitted in real and V8086 mode.
7558 */
7559 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7560 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7561 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7562 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7563 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7564
7565 if ( GCPtrFirst32 > pSel->u32Limit
7566 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7567 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7568
7569 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7570 {
7571 /** @todo CPL check. */
7572 }
7573
7574 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7575 }
7576 }
7577 else
7578 return iemRaiseGeneralProtectionFault0(pVCpu);
7579 return VINF_SUCCESS;
7580 }
7581
7582 case IEMMODE_64BIT:
7583 {
7584 RTGCPTR GCPtrMem = *pGCPtrMem;
7585 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7586 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7587
7588 Assert(cbMem >= 1);
7589 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7590 return VINF_SUCCESS;
7591 return iemRaiseGeneralProtectionFault0(pVCpu);
7592 }
7593
7594 default:
7595 AssertFailedReturn(VERR_IEM_IPE_7);
7596 }
7597}
7598
7599
7600/**
7601 * Translates a virtual address to a physical physical address and checks if we
7602 * can access the page as specified.
7603 *
7604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7605 * @param GCPtrMem The virtual address.
7606 * @param fAccess The intended access.
7607 * @param pGCPhysMem Where to return the physical address.
7608 */
7609IEM_STATIC VBOXSTRICTRC
7610iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7611{
7612 /** @todo Need a different PGM interface here. We're currently using
7613 * generic / REM interfaces. this won't cut it for R0 & RC. */
7614 RTGCPHYS GCPhys;
7615 uint64_t fFlags;
7616 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7617 if (RT_FAILURE(rc))
7618 {
7619 /** @todo Check unassigned memory in unpaged mode. */
7620 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7621 *pGCPhysMem = NIL_RTGCPHYS;
7622 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7623 }
7624
7625 /* If the page is writable and does not have the no-exec bit set, all
7626 access is allowed. Otherwise we'll have to check more carefully... */
7627 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7628 {
7629 /* Write to read only memory? */
7630 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7631 && !(fFlags & X86_PTE_RW)
7632 && ( pVCpu->iem.s.uCpl == 3
7633 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7634 {
7635 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7636 *pGCPhysMem = NIL_RTGCPHYS;
7637 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7638 }
7639
7640 /* Kernel memory accessed by userland? */
7641 if ( !(fFlags & X86_PTE_US)
7642 && pVCpu->iem.s.uCpl == 3
7643 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7644 {
7645 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7646 *pGCPhysMem = NIL_RTGCPHYS;
7647 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7648 }
7649
7650 /* Executing non-executable memory? */
7651 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7652 && (fFlags & X86_PTE_PAE_NX)
7653 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7654 {
7655 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7656 *pGCPhysMem = NIL_RTGCPHYS;
7657 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7658 VERR_ACCESS_DENIED);
7659 }
7660 }
7661
7662 /*
7663 * Set the dirty / access flags.
7664 * ASSUMES this is set when the address is translated rather than on committ...
7665 */
7666 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7667 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7668 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7669 {
7670 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7671 AssertRC(rc2);
7672 }
7673
7674 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7675 *pGCPhysMem = GCPhys;
7676 return VINF_SUCCESS;
7677}
7678
7679
7680
7681/**
7682 * Maps a physical page.
7683 *
7684 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7686 * @param GCPhysMem The physical address.
7687 * @param fAccess The intended access.
7688 * @param ppvMem Where to return the mapping address.
7689 * @param pLock The PGM lock.
7690 */
7691IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7692{
7693#ifdef IEM_VERIFICATION_MODE_FULL
7694 /* Force the alternative path so we can ignore writes. */
7695 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7696 {
7697 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7698 {
7699 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7700 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7701 if (RT_FAILURE(rc2))
7702 pVCpu->iem.s.fProblematicMemory = true;
7703 }
7704 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7705 }
7706#endif
7707#ifdef IEM_LOG_MEMORY_WRITES
7708 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7709 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7710#endif
7711#ifdef IEM_VERIFICATION_MODE_MINIMAL
7712 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7713#endif
7714
7715 /** @todo This API may require some improving later. A private deal with PGM
7716 * regarding locking and unlocking needs to be struct. A couple of TLBs
7717 * living in PGM, but with publicly accessible inlined access methods
7718 * could perhaps be an even better solution. */
7719 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7720 GCPhysMem,
7721 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7722 pVCpu->iem.s.fBypassHandlers,
7723 ppvMem,
7724 pLock);
7725 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7726 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7727
7728#ifdef IEM_VERIFICATION_MODE_FULL
7729 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7730 pVCpu->iem.s.fProblematicMemory = true;
7731#endif
7732 return rc;
7733}
7734
7735
7736/**
7737 * Unmap a page previously mapped by iemMemPageMap.
7738 *
7739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7740 * @param GCPhysMem The physical address.
7741 * @param fAccess The intended access.
7742 * @param pvMem What iemMemPageMap returned.
7743 * @param pLock The PGM lock.
7744 */
7745DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7746{
7747 NOREF(pVCpu);
7748 NOREF(GCPhysMem);
7749 NOREF(fAccess);
7750 NOREF(pvMem);
7751 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7752}
7753
7754
7755/**
7756 * Looks up a memory mapping entry.
7757 *
7758 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7760 * @param pvMem The memory address.
7761 * @param fAccess The access to.
7762 */
7763DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7764{
7765 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7766 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7767 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7768 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7769 return 0;
7770 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7771 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7772 return 1;
7773 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7774 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7775 return 2;
7776 return VERR_NOT_FOUND;
7777}
7778
7779
7780/**
7781 * Finds a free memmap entry when using iNextMapping doesn't work.
7782 *
7783 * @returns Memory mapping index, 1024 on failure.
7784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7785 */
7786IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7787{
7788 /*
7789 * The easy case.
7790 */
7791 if (pVCpu->iem.s.cActiveMappings == 0)
7792 {
7793 pVCpu->iem.s.iNextMapping = 1;
7794 return 0;
7795 }
7796
7797 /* There should be enough mappings for all instructions. */
7798 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7799
7800 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7801 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7802 return i;
7803
7804 AssertFailedReturn(1024);
7805}
7806
7807
7808/**
7809 * Commits a bounce buffer that needs writing back and unmaps it.
7810 *
7811 * @returns Strict VBox status code.
7812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7813 * @param iMemMap The index of the buffer to commit.
7814 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7815 * Always false in ring-3, obviously.
7816 */
7817IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7818{
7819 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7820 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7821#ifdef IN_RING3
7822 Assert(!fPostponeFail);
7823 RT_NOREF_PV(fPostponeFail);
7824#endif
7825
7826 /*
7827 * Do the writing.
7828 */
7829#ifndef IEM_VERIFICATION_MODE_MINIMAL
7830 PVM pVM = pVCpu->CTX_SUFF(pVM);
7831 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7832 && !IEM_VERIFICATION_ENABLED(pVCpu))
7833 {
7834 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7835 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7836 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7837 if (!pVCpu->iem.s.fBypassHandlers)
7838 {
7839 /*
7840 * Carefully and efficiently dealing with access handler return
7841 * codes make this a little bloated.
7842 */
7843 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7845 pbBuf,
7846 cbFirst,
7847 PGMACCESSORIGIN_IEM);
7848 if (rcStrict == VINF_SUCCESS)
7849 {
7850 if (cbSecond)
7851 {
7852 rcStrict = PGMPhysWrite(pVM,
7853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7854 pbBuf + cbFirst,
7855 cbSecond,
7856 PGMACCESSORIGIN_IEM);
7857 if (rcStrict == VINF_SUCCESS)
7858 { /* nothing */ }
7859 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7860 {
7861 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7864 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7865 }
7866# ifndef IN_RING3
7867 else if (fPostponeFail)
7868 {
7869 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7872 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7873 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7874 return iemSetPassUpStatus(pVCpu, rcStrict);
7875 }
7876# endif
7877 else
7878 {
7879 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7881 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7882 return rcStrict;
7883 }
7884 }
7885 }
7886 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7887 {
7888 if (!cbSecond)
7889 {
7890 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7892 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7893 }
7894 else
7895 {
7896 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7898 pbBuf + cbFirst,
7899 cbSecond,
7900 PGMACCESSORIGIN_IEM);
7901 if (rcStrict2 == VINF_SUCCESS)
7902 {
7903 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7906 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7907 }
7908 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7909 {
7910 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7913 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7914 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7915 }
7916# ifndef IN_RING3
7917 else if (fPostponeFail)
7918 {
7919 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7922 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7923 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7924 return iemSetPassUpStatus(pVCpu, rcStrict);
7925 }
7926# endif
7927 else
7928 {
7929 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7932 return rcStrict2;
7933 }
7934 }
7935 }
7936# ifndef IN_RING3
7937 else if (fPostponeFail)
7938 {
7939 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7940 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7942 if (!cbSecond)
7943 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7944 else
7945 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7946 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7947 return iemSetPassUpStatus(pVCpu, rcStrict);
7948 }
7949# endif
7950 else
7951 {
7952 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7955 return rcStrict;
7956 }
7957 }
7958 else
7959 {
7960 /*
7961 * No access handlers, much simpler.
7962 */
7963 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7964 if (RT_SUCCESS(rc))
7965 {
7966 if (cbSecond)
7967 {
7968 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7969 if (RT_SUCCESS(rc))
7970 { /* likely */ }
7971 else
7972 {
7973 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7975 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7976 return rc;
7977 }
7978 }
7979 }
7980 else
7981 {
7982 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7983 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7984 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7985 return rc;
7986 }
7987 }
7988 }
7989#endif
7990
7991#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7992 /*
7993 * Record the write(s).
7994 */
7995 if (!pVCpu->iem.s.fNoRem)
7996 {
7997 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7998 if (pEvtRec)
7999 {
8000 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8001 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8002 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8003 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8004 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8005 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8006 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8007 }
8008 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8009 {
8010 pEvtRec = iemVerifyAllocRecord(pVCpu);
8011 if (pEvtRec)
8012 {
8013 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8014 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8015 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8016 memcpy(pEvtRec->u.RamWrite.ab,
8017 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8018 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8019 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8020 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8021 }
8022 }
8023 }
8024#endif
8025#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8026 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8027 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8028 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8029 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8030 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8031 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8032
8033 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8034 g_cbIemWrote = cbWrote;
8035 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8036#endif
8037
8038 /*
8039 * Free the mapping entry.
8040 */
8041 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8042 Assert(pVCpu->iem.s.cActiveMappings != 0);
8043 pVCpu->iem.s.cActiveMappings--;
8044 return VINF_SUCCESS;
8045}
8046
8047
8048/**
8049 * iemMemMap worker that deals with a request crossing pages.
8050 */
8051IEM_STATIC VBOXSTRICTRC
8052iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8053{
8054 /*
8055 * Do the address translations.
8056 */
8057 RTGCPHYS GCPhysFirst;
8058 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8059 if (rcStrict != VINF_SUCCESS)
8060 return rcStrict;
8061
8062 RTGCPHYS GCPhysSecond;
8063 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8064 fAccess, &GCPhysSecond);
8065 if (rcStrict != VINF_SUCCESS)
8066 return rcStrict;
8067 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8068
8069 PVM pVM = pVCpu->CTX_SUFF(pVM);
8070#ifdef IEM_VERIFICATION_MODE_FULL
8071 /*
8072 * Detect problematic memory when verifying so we can select
8073 * the right execution engine. (TLB: Redo this.)
8074 */
8075 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8076 {
8077 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8078 if (RT_SUCCESS(rc2))
8079 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8080 if (RT_FAILURE(rc2))
8081 pVCpu->iem.s.fProblematicMemory = true;
8082 }
8083#endif
8084
8085
8086 /*
8087 * Read in the current memory content if it's a read, execute or partial
8088 * write access.
8089 */
8090 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8091 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8092 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8093
8094 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8095 {
8096 if (!pVCpu->iem.s.fBypassHandlers)
8097 {
8098 /*
8099 * Must carefully deal with access handler status codes here,
8100 * makes the code a bit bloated.
8101 */
8102 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8103 if (rcStrict == VINF_SUCCESS)
8104 {
8105 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8106 if (rcStrict == VINF_SUCCESS)
8107 { /*likely */ }
8108 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8109 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8110 else
8111 {
8112 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8113 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8114 return rcStrict;
8115 }
8116 }
8117 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8118 {
8119 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8120 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8121 {
8122 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8123 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8124 }
8125 else
8126 {
8127 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8128 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8129 return rcStrict2;
8130 }
8131 }
8132 else
8133 {
8134 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8135 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8136 return rcStrict;
8137 }
8138 }
8139 else
8140 {
8141 /*
8142 * No informational status codes here, much more straight forward.
8143 */
8144 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8145 if (RT_SUCCESS(rc))
8146 {
8147 Assert(rc == VINF_SUCCESS);
8148 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8149 if (RT_SUCCESS(rc))
8150 Assert(rc == VINF_SUCCESS);
8151 else
8152 {
8153 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8154 return rc;
8155 }
8156 }
8157 else
8158 {
8159 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8160 return rc;
8161 }
8162 }
8163
8164#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8165 if ( !pVCpu->iem.s.fNoRem
8166 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8167 {
8168 /*
8169 * Record the reads.
8170 */
8171 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8172 if (pEvtRec)
8173 {
8174 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8175 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8176 pEvtRec->u.RamRead.cb = cbFirstPage;
8177 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8178 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8179 }
8180 pEvtRec = iemVerifyAllocRecord(pVCpu);
8181 if (pEvtRec)
8182 {
8183 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8184 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8185 pEvtRec->u.RamRead.cb = cbSecondPage;
8186 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8187 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8188 }
8189 }
8190#endif
8191 }
8192#ifdef VBOX_STRICT
8193 else
8194 memset(pbBuf, 0xcc, cbMem);
8195 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8196 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8197#endif
8198
8199 /*
8200 * Commit the bounce buffer entry.
8201 */
8202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8203 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8204 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8205 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8206 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8207 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8208 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8209 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8210 pVCpu->iem.s.cActiveMappings++;
8211
8212 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8213 *ppvMem = pbBuf;
8214 return VINF_SUCCESS;
8215}
8216
8217
8218/**
8219 * iemMemMap woker that deals with iemMemPageMap failures.
8220 */
8221IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8222 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8223{
8224 /*
8225 * Filter out conditions we can handle and the ones which shouldn't happen.
8226 */
8227 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8228 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8229 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8230 {
8231 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8232 return rcMap;
8233 }
8234 pVCpu->iem.s.cPotentialExits++;
8235
8236 /*
8237 * Read in the current memory content if it's a read, execute or partial
8238 * write access.
8239 */
8240 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8241 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8242 {
8243 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8244 memset(pbBuf, 0xff, cbMem);
8245 else
8246 {
8247 int rc;
8248 if (!pVCpu->iem.s.fBypassHandlers)
8249 {
8250 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8251 if (rcStrict == VINF_SUCCESS)
8252 { /* nothing */ }
8253 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8254 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8255 else
8256 {
8257 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8258 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8259 return rcStrict;
8260 }
8261 }
8262 else
8263 {
8264 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8265 if (RT_SUCCESS(rc))
8266 { /* likely */ }
8267 else
8268 {
8269 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8270 GCPhysFirst, rc));
8271 return rc;
8272 }
8273 }
8274 }
8275
8276#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8277 if ( !pVCpu->iem.s.fNoRem
8278 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8279 {
8280 /*
8281 * Record the read.
8282 */
8283 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8284 if (pEvtRec)
8285 {
8286 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8287 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8288 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8289 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8290 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8291 }
8292 }
8293#endif
8294 }
8295#ifdef VBOX_STRICT
8296 else
8297 memset(pbBuf, 0xcc, cbMem);
8298#endif
8299#ifdef VBOX_STRICT
8300 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8301 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8302#endif
8303
8304 /*
8305 * Commit the bounce buffer entry.
8306 */
8307 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8308 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8309 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8310 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8311 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8312 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8313 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8314 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8315 pVCpu->iem.s.cActiveMappings++;
8316
8317 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8318 *ppvMem = pbBuf;
8319 return VINF_SUCCESS;
8320}
8321
8322
8323
8324/**
8325 * Maps the specified guest memory for the given kind of access.
8326 *
8327 * This may be using bounce buffering of the memory if it's crossing a page
8328 * boundary or if there is an access handler installed for any of it. Because
8329 * of lock prefix guarantees, we're in for some extra clutter when this
8330 * happens.
8331 *
8332 * This may raise a \#GP, \#SS, \#PF or \#AC.
8333 *
8334 * @returns VBox strict status code.
8335 *
8336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8337 * @param ppvMem Where to return the pointer to the mapped
8338 * memory.
8339 * @param cbMem The number of bytes to map. This is usually 1,
8340 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8341 * string operations it can be up to a page.
8342 * @param iSegReg The index of the segment register to use for
8343 * this access. The base and limits are checked.
8344 * Use UINT8_MAX to indicate that no segmentation
8345 * is required (for IDT, GDT and LDT accesses).
8346 * @param GCPtrMem The address of the guest memory.
8347 * @param fAccess How the memory is being accessed. The
8348 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8349 * how to map the memory, while the
8350 * IEM_ACCESS_WHAT_XXX bit is used when raising
8351 * exceptions.
8352 */
8353IEM_STATIC VBOXSTRICTRC
8354iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8355{
8356 /*
8357 * Check the input and figure out which mapping entry to use.
8358 */
8359 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8360 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8361 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8362
8363 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8364 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8365 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8366 {
8367 iMemMap = iemMemMapFindFree(pVCpu);
8368 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8369 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8370 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8371 pVCpu->iem.s.aMemMappings[2].fAccess),
8372 VERR_IEM_IPE_9);
8373 }
8374
8375 /*
8376 * Map the memory, checking that we can actually access it. If something
8377 * slightly complicated happens, fall back on bounce buffering.
8378 */
8379 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8380 if (rcStrict != VINF_SUCCESS)
8381 return rcStrict;
8382
8383 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8384 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8385
8386 RTGCPHYS GCPhysFirst;
8387 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8388 if (rcStrict != VINF_SUCCESS)
8389 return rcStrict;
8390
8391 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8392 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8393 if (fAccess & IEM_ACCESS_TYPE_READ)
8394 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8395
8396 void *pvMem;
8397 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8398 if (rcStrict != VINF_SUCCESS)
8399 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8400
8401 /*
8402 * Fill in the mapping table entry.
8403 */
8404 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8405 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8406 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8407 pVCpu->iem.s.cActiveMappings++;
8408
8409 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8410 *ppvMem = pvMem;
8411 return VINF_SUCCESS;
8412}
8413
8414
8415/**
8416 * Commits the guest memory if bounce buffered and unmaps it.
8417 *
8418 * @returns Strict VBox status code.
8419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8420 * @param pvMem The mapping.
8421 * @param fAccess The kind of access.
8422 */
8423IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8424{
8425 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8426 AssertReturn(iMemMap >= 0, iMemMap);
8427
8428 /* If it's bounce buffered, we may need to write back the buffer. */
8429 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8430 {
8431 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8432 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8433 }
8434 /* Otherwise unlock it. */
8435 else
8436 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8437
8438 /* Free the entry. */
8439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8440 Assert(pVCpu->iem.s.cActiveMappings != 0);
8441 pVCpu->iem.s.cActiveMappings--;
8442 return VINF_SUCCESS;
8443}
8444
8445#ifdef IEM_WITH_SETJMP
8446
8447/**
8448 * Maps the specified guest memory for the given kind of access, longjmp on
8449 * error.
8450 *
8451 * This may be using bounce buffering of the memory if it's crossing a page
8452 * boundary or if there is an access handler installed for any of it. Because
8453 * of lock prefix guarantees, we're in for some extra clutter when this
8454 * happens.
8455 *
8456 * This may raise a \#GP, \#SS, \#PF or \#AC.
8457 *
8458 * @returns Pointer to the mapped memory.
8459 *
8460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8461 * @param cbMem The number of bytes to map. This is usually 1,
8462 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8463 * string operations it can be up to a page.
8464 * @param iSegReg The index of the segment register to use for
8465 * this access. The base and limits are checked.
8466 * Use UINT8_MAX to indicate that no segmentation
8467 * is required (for IDT, GDT and LDT accesses).
8468 * @param GCPtrMem The address of the guest memory.
8469 * @param fAccess How the memory is being accessed. The
8470 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8471 * how to map the memory, while the
8472 * IEM_ACCESS_WHAT_XXX bit is used when raising
8473 * exceptions.
8474 */
8475IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8476{
8477 /*
8478 * Check the input and figure out which mapping entry to use.
8479 */
8480 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8481 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8482 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8483
8484 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8485 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8486 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8487 {
8488 iMemMap = iemMemMapFindFree(pVCpu);
8489 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8490 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8491 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8492 pVCpu->iem.s.aMemMappings[2].fAccess),
8493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8494 }
8495
8496 /*
8497 * Map the memory, checking that we can actually access it. If something
8498 * slightly complicated happens, fall back on bounce buffering.
8499 */
8500 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8501 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8502 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8503
8504 /* Crossing a page boundary? */
8505 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8506 { /* No (likely). */ }
8507 else
8508 {
8509 void *pvMem;
8510 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8511 if (rcStrict == VINF_SUCCESS)
8512 return pvMem;
8513 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8514 }
8515
8516 RTGCPHYS GCPhysFirst;
8517 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8518 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8519 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8520
8521 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8522 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8523 if (fAccess & IEM_ACCESS_TYPE_READ)
8524 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8525
8526 void *pvMem;
8527 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8528 if (rcStrict == VINF_SUCCESS)
8529 { /* likely */ }
8530 else
8531 {
8532 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8533 if (rcStrict == VINF_SUCCESS)
8534 return pvMem;
8535 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8536 }
8537
8538 /*
8539 * Fill in the mapping table entry.
8540 */
8541 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8542 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8543 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8544 pVCpu->iem.s.cActiveMappings++;
8545
8546 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8547 return pvMem;
8548}
8549
8550
8551/**
8552 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8553 *
8554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8555 * @param pvMem The mapping.
8556 * @param fAccess The kind of access.
8557 */
8558IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8559{
8560 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8561 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8562
8563 /* If it's bounce buffered, we may need to write back the buffer. */
8564 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8565 {
8566 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8567 {
8568 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8569 if (rcStrict == VINF_SUCCESS)
8570 return;
8571 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8572 }
8573 }
8574 /* Otherwise unlock it. */
8575 else
8576 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8577
8578 /* Free the entry. */
8579 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8580 Assert(pVCpu->iem.s.cActiveMappings != 0);
8581 pVCpu->iem.s.cActiveMappings--;
8582}
8583
8584#endif
8585
8586#ifndef IN_RING3
8587/**
8588 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8589 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8590 *
8591 * Allows the instruction to be completed and retired, while the IEM user will
8592 * return to ring-3 immediately afterwards and do the postponed writes there.
8593 *
8594 * @returns VBox status code (no strict statuses). Caller must check
8595 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8597 * @param pvMem The mapping.
8598 * @param fAccess The kind of access.
8599 */
8600IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8601{
8602 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8603 AssertReturn(iMemMap >= 0, iMemMap);
8604
8605 /* If it's bounce buffered, we may need to write back the buffer. */
8606 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8607 {
8608 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8609 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8610 }
8611 /* Otherwise unlock it. */
8612 else
8613 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8614
8615 /* Free the entry. */
8616 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8617 Assert(pVCpu->iem.s.cActiveMappings != 0);
8618 pVCpu->iem.s.cActiveMappings--;
8619 return VINF_SUCCESS;
8620}
8621#endif
8622
8623
8624/**
8625 * Rollbacks mappings, releasing page locks and such.
8626 *
8627 * The caller shall only call this after checking cActiveMappings.
8628 *
8629 * @returns Strict VBox status code to pass up.
8630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8631 */
8632IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8633{
8634 Assert(pVCpu->iem.s.cActiveMappings > 0);
8635
8636 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8637 while (iMemMap-- > 0)
8638 {
8639 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8640 if (fAccess != IEM_ACCESS_INVALID)
8641 {
8642 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8643 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8644 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8645 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8646 Assert(pVCpu->iem.s.cActiveMappings > 0);
8647 pVCpu->iem.s.cActiveMappings--;
8648 }
8649 }
8650}
8651
8652
8653/**
8654 * Fetches a data byte.
8655 *
8656 * @returns Strict VBox status code.
8657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8658 * @param pu8Dst Where to return the byte.
8659 * @param iSegReg The index of the segment register to use for
8660 * this access. The base and limits are checked.
8661 * @param GCPtrMem The address of the guest memory.
8662 */
8663IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8664{
8665 /* The lazy approach for now... */
8666 uint8_t const *pu8Src;
8667 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8668 if (rc == VINF_SUCCESS)
8669 {
8670 *pu8Dst = *pu8Src;
8671 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8672 }
8673 return rc;
8674}
8675
8676
8677#ifdef IEM_WITH_SETJMP
8678/**
8679 * Fetches a data byte, longjmp on error.
8680 *
8681 * @returns The byte.
8682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8683 * @param iSegReg The index of the segment register to use for
8684 * this access. The base and limits are checked.
8685 * @param GCPtrMem The address of the guest memory.
8686 */
8687DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8688{
8689 /* The lazy approach for now... */
8690 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8691 uint8_t const bRet = *pu8Src;
8692 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8693 return bRet;
8694}
8695#endif /* IEM_WITH_SETJMP */
8696
8697
8698/**
8699 * Fetches a data word.
8700 *
8701 * @returns Strict VBox status code.
8702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8703 * @param pu16Dst Where to return the word.
8704 * @param iSegReg The index of the segment register to use for
8705 * this access. The base and limits are checked.
8706 * @param GCPtrMem The address of the guest memory.
8707 */
8708IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8709{
8710 /* The lazy approach for now... */
8711 uint16_t const *pu16Src;
8712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8713 if (rc == VINF_SUCCESS)
8714 {
8715 *pu16Dst = *pu16Src;
8716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8717 }
8718 return rc;
8719}
8720
8721
8722#ifdef IEM_WITH_SETJMP
8723/**
8724 * Fetches a data word, longjmp on error.
8725 *
8726 * @returns The word
8727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8728 * @param iSegReg The index of the segment register to use for
8729 * this access. The base and limits are checked.
8730 * @param GCPtrMem The address of the guest memory.
8731 */
8732DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8733{
8734 /* The lazy approach for now... */
8735 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8736 uint16_t const u16Ret = *pu16Src;
8737 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8738 return u16Ret;
8739}
8740#endif
8741
8742
8743/**
8744 * Fetches a data dword.
8745 *
8746 * @returns Strict VBox status code.
8747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8748 * @param pu32Dst Where to return the dword.
8749 * @param iSegReg The index of the segment register to use for
8750 * this access. The base and limits are checked.
8751 * @param GCPtrMem The address of the guest memory.
8752 */
8753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8754{
8755 /* The lazy approach for now... */
8756 uint32_t const *pu32Src;
8757 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8758 if (rc == VINF_SUCCESS)
8759 {
8760 *pu32Dst = *pu32Src;
8761 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8762 }
8763 return rc;
8764}
8765
8766
8767#ifdef IEM_WITH_SETJMP
8768
8769IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8770{
8771 Assert(cbMem >= 1);
8772 Assert(iSegReg < X86_SREG_COUNT);
8773
8774 /*
8775 * 64-bit mode is simpler.
8776 */
8777 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8778 {
8779 if (iSegReg >= X86_SREG_FS)
8780 {
8781 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8782 GCPtrMem += pSel->u64Base;
8783 }
8784
8785 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8786 return GCPtrMem;
8787 }
8788 /*
8789 * 16-bit and 32-bit segmentation.
8790 */
8791 else
8792 {
8793 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8794 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8795 == X86DESCATTR_P /* data, expand up */
8796 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8797 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8798 {
8799 /* expand up */
8800 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8801 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8802 && GCPtrLast32 > (uint32_t)GCPtrMem))
8803 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8804 }
8805 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8806 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8807 {
8808 /* expand down */
8809 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8810 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8811 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8812 && GCPtrLast32 > (uint32_t)GCPtrMem))
8813 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8814 }
8815 else
8816 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8817 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8818 }
8819 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8820}
8821
8822
8823IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8824{
8825 Assert(cbMem >= 1);
8826 Assert(iSegReg < X86_SREG_COUNT);
8827
8828 /*
8829 * 64-bit mode is simpler.
8830 */
8831 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8832 {
8833 if (iSegReg >= X86_SREG_FS)
8834 {
8835 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8836 GCPtrMem += pSel->u64Base;
8837 }
8838
8839 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8840 return GCPtrMem;
8841 }
8842 /*
8843 * 16-bit and 32-bit segmentation.
8844 */
8845 else
8846 {
8847 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8848 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8849 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8850 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8851 {
8852 /* expand up */
8853 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8854 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8855 && GCPtrLast32 > (uint32_t)GCPtrMem))
8856 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8857 }
8858 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8859 {
8860 /* expand down */
8861 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8862 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8863 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8864 && GCPtrLast32 > (uint32_t)GCPtrMem))
8865 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8866 }
8867 else
8868 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8869 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8870 }
8871 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8872}
8873
8874
8875/**
8876 * Fetches a data dword, longjmp on error, fallback/safe version.
8877 *
8878 * @returns The dword
8879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8880 * @param iSegReg The index of the segment register to use for
8881 * this access. The base and limits are checked.
8882 * @param GCPtrMem The address of the guest memory.
8883 */
8884IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8885{
8886 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8887 uint32_t const u32Ret = *pu32Src;
8888 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8889 return u32Ret;
8890}
8891
8892
8893/**
8894 * Fetches a data dword, longjmp on error.
8895 *
8896 * @returns The dword
8897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8898 * @param iSegReg The index of the segment register to use for
8899 * this access. The base and limits are checked.
8900 * @param GCPtrMem The address of the guest memory.
8901 */
8902DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8903{
8904# ifdef IEM_WITH_DATA_TLB
8905 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8906 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8907 {
8908 /// @todo more later.
8909 }
8910
8911 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8912# else
8913 /* The lazy approach. */
8914 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8915 uint32_t const u32Ret = *pu32Src;
8916 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8917 return u32Ret;
8918# endif
8919}
8920#endif
8921
8922
8923#ifdef SOME_UNUSED_FUNCTION
8924/**
8925 * Fetches a data dword and sign extends it to a qword.
8926 *
8927 * @returns Strict VBox status code.
8928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8929 * @param pu64Dst Where to return the sign extended value.
8930 * @param iSegReg The index of the segment register to use for
8931 * this access. The base and limits are checked.
8932 * @param GCPtrMem The address of the guest memory.
8933 */
8934IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8935{
8936 /* The lazy approach for now... */
8937 int32_t const *pi32Src;
8938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8939 if (rc == VINF_SUCCESS)
8940 {
8941 *pu64Dst = *pi32Src;
8942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8943 }
8944#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8945 else
8946 *pu64Dst = 0;
8947#endif
8948 return rc;
8949}
8950#endif
8951
8952
8953/**
8954 * Fetches a data qword.
8955 *
8956 * @returns Strict VBox status code.
8957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8958 * @param pu64Dst Where to return the qword.
8959 * @param iSegReg The index of the segment register to use for
8960 * this access. The base and limits are checked.
8961 * @param GCPtrMem The address of the guest memory.
8962 */
8963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8964{
8965 /* The lazy approach for now... */
8966 uint64_t const *pu64Src;
8967 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8968 if (rc == VINF_SUCCESS)
8969 {
8970 *pu64Dst = *pu64Src;
8971 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8972 }
8973 return rc;
8974}
8975
8976
8977#ifdef IEM_WITH_SETJMP
8978/**
8979 * Fetches a data qword, longjmp on error.
8980 *
8981 * @returns The qword.
8982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8983 * @param iSegReg The index of the segment register to use for
8984 * this access. The base and limits are checked.
8985 * @param GCPtrMem The address of the guest memory.
8986 */
8987DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8988{
8989 /* The lazy approach for now... */
8990 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8991 uint64_t const u64Ret = *pu64Src;
8992 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8993 return u64Ret;
8994}
8995#endif
8996
8997
8998/**
8999 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9000 *
9001 * @returns Strict VBox status code.
9002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9003 * @param pu64Dst Where to return the qword.
9004 * @param iSegReg The index of the segment register to use for
9005 * this access. The base and limits are checked.
9006 * @param GCPtrMem The address of the guest memory.
9007 */
9008IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9009{
9010 /* The lazy approach for now... */
9011 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9012 if (RT_UNLIKELY(GCPtrMem & 15))
9013 return iemRaiseGeneralProtectionFault0(pVCpu);
9014
9015 uint64_t const *pu64Src;
9016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9017 if (rc == VINF_SUCCESS)
9018 {
9019 *pu64Dst = *pu64Src;
9020 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9021 }
9022 return rc;
9023}
9024
9025
9026#ifdef IEM_WITH_SETJMP
9027/**
9028 * Fetches a data qword, longjmp on error.
9029 *
9030 * @returns The qword.
9031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9032 * @param iSegReg The index of the segment register to use for
9033 * this access. The base and limits are checked.
9034 * @param GCPtrMem The address of the guest memory.
9035 */
9036DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9037{
9038 /* The lazy approach for now... */
9039 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9040 if (RT_LIKELY(!(GCPtrMem & 15)))
9041 {
9042 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9043 uint64_t const u64Ret = *pu64Src;
9044 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9045 return u64Ret;
9046 }
9047
9048 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9049 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9050}
9051#endif
9052
9053
9054/**
9055 * Fetches a data tword.
9056 *
9057 * @returns Strict VBox status code.
9058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9059 * @param pr80Dst Where to return the tword.
9060 * @param iSegReg The index of the segment register to use for
9061 * this access. The base and limits are checked.
9062 * @param GCPtrMem The address of the guest memory.
9063 */
9064IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9065{
9066 /* The lazy approach for now... */
9067 PCRTFLOAT80U pr80Src;
9068 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9069 if (rc == VINF_SUCCESS)
9070 {
9071 *pr80Dst = *pr80Src;
9072 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9073 }
9074 return rc;
9075}
9076
9077
9078#ifdef IEM_WITH_SETJMP
9079/**
9080 * Fetches a data tword, longjmp on error.
9081 *
9082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9083 * @param pr80Dst Where to return the tword.
9084 * @param iSegReg The index of the segment register to use for
9085 * this access. The base and limits are checked.
9086 * @param GCPtrMem The address of the guest memory.
9087 */
9088DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9089{
9090 /* The lazy approach for now... */
9091 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9092 *pr80Dst = *pr80Src;
9093 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9094}
9095#endif
9096
9097
9098/**
9099 * Fetches a data dqword (double qword), generally SSE related.
9100 *
9101 * @returns Strict VBox status code.
9102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9103 * @param pu128Dst Where to return the qword.
9104 * @param iSegReg The index of the segment register to use for
9105 * this access. The base and limits are checked.
9106 * @param GCPtrMem The address of the guest memory.
9107 */
9108IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9109{
9110 /* The lazy approach for now... */
9111 uint128_t const *pu128Src;
9112 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9113 if (rc == VINF_SUCCESS)
9114 {
9115 *pu128Dst = *pu128Src;
9116 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9117 }
9118 return rc;
9119}
9120
9121
9122#ifdef IEM_WITH_SETJMP
9123/**
9124 * Fetches a data dqword (double qword), generally SSE related.
9125 *
9126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9127 * @param pu128Dst Where to return the qword.
9128 * @param iSegReg The index of the segment register to use for
9129 * this access. The base and limits are checked.
9130 * @param GCPtrMem The address of the guest memory.
9131 */
9132IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9133{
9134 /* The lazy approach for now... */
9135 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9136 *pu128Dst = *pu128Src;
9137 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9138}
9139#endif
9140
9141
9142/**
9143 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9144 * related.
9145 *
9146 * Raises \#GP(0) if not aligned.
9147 *
9148 * @returns Strict VBox status code.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param pu128Dst Where to return the qword.
9151 * @param iSegReg The index of the segment register to use for
9152 * this access. The base and limits are checked.
9153 * @param GCPtrMem The address of the guest memory.
9154 */
9155IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9156{
9157 /* The lazy approach for now... */
9158 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9159 if ( (GCPtrMem & 15)
9160 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9161 return iemRaiseGeneralProtectionFault0(pVCpu);
9162
9163 uint128_t const *pu128Src;
9164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9165 if (rc == VINF_SUCCESS)
9166 {
9167 *pu128Dst = *pu128Src;
9168 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9169 }
9170 return rc;
9171}
9172
9173
9174#ifdef IEM_WITH_SETJMP
9175/**
9176 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9177 * related, longjmp on error.
9178 *
9179 * Raises \#GP(0) if not aligned.
9180 *
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param pu128Dst Where to return the qword.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9191 if ( (GCPtrMem & 15) == 0
9192 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9193 {
9194 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9195 IEM_ACCESS_DATA_R);
9196 *pu128Dst = *pu128Src;
9197 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9198 return;
9199 }
9200
9201 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9203}
9204#endif
9205
9206
9207
9208/**
9209 * Fetches a descriptor register (lgdt, lidt).
9210 *
9211 * @returns Strict VBox status code.
9212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9213 * @param pcbLimit Where to return the limit.
9214 * @param pGCPtrBase Where to return the base.
9215 * @param iSegReg The index of the segment register to use for
9216 * this access. The base and limits are checked.
9217 * @param GCPtrMem The address of the guest memory.
9218 * @param enmOpSize The effective operand size.
9219 */
9220IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9221 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9222{
9223 /*
9224 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9225 * little special:
9226 * - The two reads are done separately.
9227 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9228 * - We suspect the 386 to actually commit the limit before the base in
9229 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9230 * don't try emulate this eccentric behavior, because it's not well
9231 * enough understood and rather hard to trigger.
9232 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9233 */
9234 VBOXSTRICTRC rcStrict;
9235 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9236 {
9237 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9238 if (rcStrict == VINF_SUCCESS)
9239 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9240 }
9241 else
9242 {
9243 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9244 if (enmOpSize == IEMMODE_32BIT)
9245 {
9246 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9247 {
9248 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9249 if (rcStrict == VINF_SUCCESS)
9250 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9251 }
9252 else
9253 {
9254 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9255 if (rcStrict == VINF_SUCCESS)
9256 {
9257 *pcbLimit = (uint16_t)uTmp;
9258 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9259 }
9260 }
9261 if (rcStrict == VINF_SUCCESS)
9262 *pGCPtrBase = uTmp;
9263 }
9264 else
9265 {
9266 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9267 if (rcStrict == VINF_SUCCESS)
9268 {
9269 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9270 if (rcStrict == VINF_SUCCESS)
9271 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9272 }
9273 }
9274 }
9275 return rcStrict;
9276}
9277
9278
9279
9280/**
9281 * Stores a data byte.
9282 *
9283 * @returns Strict VBox status code.
9284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9285 * @param iSegReg The index of the segment register to use for
9286 * this access. The base and limits are checked.
9287 * @param GCPtrMem The address of the guest memory.
9288 * @param u8Value The value to store.
9289 */
9290IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9291{
9292 /* The lazy approach for now... */
9293 uint8_t *pu8Dst;
9294 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9295 if (rc == VINF_SUCCESS)
9296 {
9297 *pu8Dst = u8Value;
9298 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9299 }
9300 return rc;
9301}
9302
9303
9304#ifdef IEM_WITH_SETJMP
9305/**
9306 * Stores a data byte, longjmp on error.
9307 *
9308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9309 * @param iSegReg The index of the segment register to use for
9310 * this access. The base and limits are checked.
9311 * @param GCPtrMem The address of the guest memory.
9312 * @param u8Value The value to store.
9313 */
9314IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9315{
9316 /* The lazy approach for now... */
9317 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9318 *pu8Dst = u8Value;
9319 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9320}
9321#endif
9322
9323
9324/**
9325 * Stores a data word.
9326 *
9327 * @returns Strict VBox status code.
9328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9329 * @param iSegReg The index of the segment register to use for
9330 * this access. The base and limits are checked.
9331 * @param GCPtrMem The address of the guest memory.
9332 * @param u16Value The value to store.
9333 */
9334IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9335{
9336 /* The lazy approach for now... */
9337 uint16_t *pu16Dst;
9338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9339 if (rc == VINF_SUCCESS)
9340 {
9341 *pu16Dst = u16Value;
9342 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9343 }
9344 return rc;
9345}
9346
9347
9348#ifdef IEM_WITH_SETJMP
9349/**
9350 * Stores a data word, longjmp on error.
9351 *
9352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9353 * @param iSegReg The index of the segment register to use for
9354 * this access. The base and limits are checked.
9355 * @param GCPtrMem The address of the guest memory.
9356 * @param u16Value The value to store.
9357 */
9358IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9359{
9360 /* The lazy approach for now... */
9361 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9362 *pu16Dst = u16Value;
9363 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9364}
9365#endif
9366
9367
9368/**
9369 * Stores a data dword.
9370 *
9371 * @returns Strict VBox status code.
9372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9373 * @param iSegReg The index of the segment register to use for
9374 * this access. The base and limits are checked.
9375 * @param GCPtrMem The address of the guest memory.
9376 * @param u32Value The value to store.
9377 */
9378IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9379{
9380 /* The lazy approach for now... */
9381 uint32_t *pu32Dst;
9382 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9383 if (rc == VINF_SUCCESS)
9384 {
9385 *pu32Dst = u32Value;
9386 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9387 }
9388 return rc;
9389}
9390
9391
9392#ifdef IEM_WITH_SETJMP
9393/**
9394 * Stores a data dword.
9395 *
9396 * @returns Strict VBox status code.
9397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 * @param u32Value The value to store.
9402 */
9403IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9404{
9405 /* The lazy approach for now... */
9406 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9407 *pu32Dst = u32Value;
9408 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9409}
9410#endif
9411
9412
9413/**
9414 * Stores a data qword.
9415 *
9416 * @returns Strict VBox status code.
9417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9418 * @param iSegReg The index of the segment register to use for
9419 * this access. The base and limits are checked.
9420 * @param GCPtrMem The address of the guest memory.
9421 * @param u64Value The value to store.
9422 */
9423IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9424{
9425 /* The lazy approach for now... */
9426 uint64_t *pu64Dst;
9427 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9428 if (rc == VINF_SUCCESS)
9429 {
9430 *pu64Dst = u64Value;
9431 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9432 }
9433 return rc;
9434}
9435
9436
9437#ifdef IEM_WITH_SETJMP
9438/**
9439 * Stores a data qword, longjmp on error.
9440 *
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param iSegReg The index of the segment register to use for
9443 * this access. The base and limits are checked.
9444 * @param GCPtrMem The address of the guest memory.
9445 * @param u64Value The value to store.
9446 */
9447IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9448{
9449 /* The lazy approach for now... */
9450 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9451 *pu64Dst = u64Value;
9452 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9453}
9454#endif
9455
9456
9457/**
9458 * Stores a data dqword.
9459 *
9460 * @returns Strict VBox status code.
9461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9462 * @param iSegReg The index of the segment register to use for
9463 * this access. The base and limits are checked.
9464 * @param GCPtrMem The address of the guest memory.
9465 * @param u128Value The value to store.
9466 */
9467IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9468{
9469 /* The lazy approach for now... */
9470 uint128_t *pu128Dst;
9471 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9472 if (rc == VINF_SUCCESS)
9473 {
9474 *pu128Dst = u128Value;
9475 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9476 }
9477 return rc;
9478}
9479
9480
9481#ifdef IEM_WITH_SETJMP
9482/**
9483 * Stores a data dqword, longjmp on error.
9484 *
9485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9486 * @param iSegReg The index of the segment register to use for
9487 * this access. The base and limits are checked.
9488 * @param GCPtrMem The address of the guest memory.
9489 * @param u128Value The value to store.
9490 */
9491IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9492{
9493 /* The lazy approach for now... */
9494 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9495 *pu128Dst = u128Value;
9496 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9497}
9498#endif
9499
9500
9501/**
9502 * Stores a data dqword, SSE aligned.
9503 *
9504 * @returns Strict VBox status code.
9505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9506 * @param iSegReg The index of the segment register to use for
9507 * this access. The base and limits are checked.
9508 * @param GCPtrMem The address of the guest memory.
9509 * @param u128Value The value to store.
9510 */
9511IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9512{
9513 /* The lazy approach for now... */
9514 if ( (GCPtrMem & 15)
9515 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9516 return iemRaiseGeneralProtectionFault0(pVCpu);
9517
9518 uint128_t *pu128Dst;
9519 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9520 if (rc == VINF_SUCCESS)
9521 {
9522 *pu128Dst = u128Value;
9523 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9524 }
9525 return rc;
9526}
9527
9528
9529#ifdef IEM_WITH_SETJMP
9530/**
9531 * Stores a data dqword, SSE aligned.
9532 *
9533 * @returns Strict VBox status code.
9534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9535 * @param iSegReg The index of the segment register to use for
9536 * this access. The base and limits are checked.
9537 * @param GCPtrMem The address of the guest memory.
9538 * @param u128Value The value to store.
9539 */
9540DECL_NO_INLINE(IEM_STATIC, void)
9541iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9542{
9543 /* The lazy approach for now... */
9544 if ( (GCPtrMem & 15) == 0
9545 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9546 {
9547 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9548 *pu128Dst = u128Value;
9549 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9550 return;
9551 }
9552
9553 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9554 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9555}
9556#endif
9557
9558
9559/**
9560 * Stores a descriptor register (sgdt, sidt).
9561 *
9562 * @returns Strict VBox status code.
9563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9564 * @param cbLimit The limit.
9565 * @param GCPtrBase The base address.
9566 * @param iSegReg The index of the segment register to use for
9567 * this access. The base and limits are checked.
9568 * @param GCPtrMem The address of the guest memory.
9569 */
9570IEM_STATIC VBOXSTRICTRC
9571iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9572{
9573 /*
9574 * The SIDT and SGDT instructions actually stores the data using two
9575 * independent writes. The instructions does not respond to opsize prefixes.
9576 */
9577 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9578 if (rcStrict == VINF_SUCCESS)
9579 {
9580 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9581 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9582 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9583 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9584 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9585 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9586 else
9587 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9588 }
9589 return rcStrict;
9590}
9591
9592
9593/**
9594 * Pushes a word onto the stack.
9595 *
9596 * @returns Strict VBox status code.
9597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9598 * @param u16Value The value to push.
9599 */
9600IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9601{
9602 /* Increment the stack pointer. */
9603 uint64_t uNewRsp;
9604 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9605 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9606
9607 /* Write the word the lazy way. */
9608 uint16_t *pu16Dst;
9609 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9610 if (rc == VINF_SUCCESS)
9611 {
9612 *pu16Dst = u16Value;
9613 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9614 }
9615
9616 /* Commit the new RSP value unless we an access handler made trouble. */
9617 if (rc == VINF_SUCCESS)
9618 pCtx->rsp = uNewRsp;
9619
9620 return rc;
9621}
9622
9623
9624/**
9625 * Pushes a dword onto the stack.
9626 *
9627 * @returns Strict VBox status code.
9628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9629 * @param u32Value The value to push.
9630 */
9631IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9632{
9633 /* Increment the stack pointer. */
9634 uint64_t uNewRsp;
9635 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9636 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9637
9638 /* Write the dword the lazy way. */
9639 uint32_t *pu32Dst;
9640 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9641 if (rc == VINF_SUCCESS)
9642 {
9643 *pu32Dst = u32Value;
9644 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9645 }
9646
9647 /* Commit the new RSP value unless we an access handler made trouble. */
9648 if (rc == VINF_SUCCESS)
9649 pCtx->rsp = uNewRsp;
9650
9651 return rc;
9652}
9653
9654
9655/**
9656 * Pushes a dword segment register value onto the stack.
9657 *
9658 * @returns Strict VBox status code.
9659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9660 * @param u32Value The value to push.
9661 */
9662IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9663{
9664 /* Increment the stack pointer. */
9665 uint64_t uNewRsp;
9666 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9667 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9668
9669 VBOXSTRICTRC rc;
9670 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9671 {
9672 /* The recompiler writes a full dword. */
9673 uint32_t *pu32Dst;
9674 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9675 if (rc == VINF_SUCCESS)
9676 {
9677 *pu32Dst = u32Value;
9678 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9679 }
9680 }
9681 else
9682 {
9683 /* The intel docs talks about zero extending the selector register
9684 value. My actual intel CPU here might be zero extending the value
9685 but it still only writes the lower word... */
9686 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9687 * happens when crossing an electric page boundrary, is the high word checked
9688 * for write accessibility or not? Probably it is. What about segment limits?
9689 * It appears this behavior is also shared with trap error codes.
9690 *
9691 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9692 * ancient hardware when it actually did change. */
9693 uint16_t *pu16Dst;
9694 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9695 if (rc == VINF_SUCCESS)
9696 {
9697 *pu16Dst = (uint16_t)u32Value;
9698 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9699 }
9700 }
9701
9702 /* Commit the new RSP value unless we an access handler made trouble. */
9703 if (rc == VINF_SUCCESS)
9704 pCtx->rsp = uNewRsp;
9705
9706 return rc;
9707}
9708
9709
9710/**
9711 * Pushes a qword onto the stack.
9712 *
9713 * @returns Strict VBox status code.
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param u64Value The value to push.
9716 */
9717IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9718{
9719 /* Increment the stack pointer. */
9720 uint64_t uNewRsp;
9721 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9722 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9723
9724 /* Write the word the lazy way. */
9725 uint64_t *pu64Dst;
9726 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9727 if (rc == VINF_SUCCESS)
9728 {
9729 *pu64Dst = u64Value;
9730 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9731 }
9732
9733 /* Commit the new RSP value unless we an access handler made trouble. */
9734 if (rc == VINF_SUCCESS)
9735 pCtx->rsp = uNewRsp;
9736
9737 return rc;
9738}
9739
9740
9741/**
9742 * Pops a word from the stack.
9743 *
9744 * @returns Strict VBox status code.
9745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9746 * @param pu16Value Where to store the popped value.
9747 */
9748IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9749{
9750 /* Increment the stack pointer. */
9751 uint64_t uNewRsp;
9752 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9753 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9754
9755 /* Write the word the lazy way. */
9756 uint16_t const *pu16Src;
9757 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9758 if (rc == VINF_SUCCESS)
9759 {
9760 *pu16Value = *pu16Src;
9761 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9762
9763 /* Commit the new RSP value. */
9764 if (rc == VINF_SUCCESS)
9765 pCtx->rsp = uNewRsp;
9766 }
9767
9768 return rc;
9769}
9770
9771
9772/**
9773 * Pops a dword from the stack.
9774 *
9775 * @returns Strict VBox status code.
9776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9777 * @param pu32Value Where to store the popped value.
9778 */
9779IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9780{
9781 /* Increment the stack pointer. */
9782 uint64_t uNewRsp;
9783 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9784 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9785
9786 /* Write the word the lazy way. */
9787 uint32_t const *pu32Src;
9788 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9789 if (rc == VINF_SUCCESS)
9790 {
9791 *pu32Value = *pu32Src;
9792 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9793
9794 /* Commit the new RSP value. */
9795 if (rc == VINF_SUCCESS)
9796 pCtx->rsp = uNewRsp;
9797 }
9798
9799 return rc;
9800}
9801
9802
9803/**
9804 * Pops a qword from the stack.
9805 *
9806 * @returns Strict VBox status code.
9807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9808 * @param pu64Value Where to store the popped value.
9809 */
9810IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9811{
9812 /* Increment the stack pointer. */
9813 uint64_t uNewRsp;
9814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9815 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9816
9817 /* Write the word the lazy way. */
9818 uint64_t const *pu64Src;
9819 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9820 if (rc == VINF_SUCCESS)
9821 {
9822 *pu64Value = *pu64Src;
9823 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9824
9825 /* Commit the new RSP value. */
9826 if (rc == VINF_SUCCESS)
9827 pCtx->rsp = uNewRsp;
9828 }
9829
9830 return rc;
9831}
9832
9833
9834/**
9835 * Pushes a word onto the stack, using a temporary stack pointer.
9836 *
9837 * @returns Strict VBox status code.
9838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9839 * @param u16Value The value to push.
9840 * @param pTmpRsp Pointer to the temporary stack pointer.
9841 */
9842IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9843{
9844 /* Increment the stack pointer. */
9845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9846 RTUINT64U NewRsp = *pTmpRsp;
9847 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9848
9849 /* Write the word the lazy way. */
9850 uint16_t *pu16Dst;
9851 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9852 if (rc == VINF_SUCCESS)
9853 {
9854 *pu16Dst = u16Value;
9855 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9856 }
9857
9858 /* Commit the new RSP value unless we an access handler made trouble. */
9859 if (rc == VINF_SUCCESS)
9860 *pTmpRsp = NewRsp;
9861
9862 return rc;
9863}
9864
9865
9866/**
9867 * Pushes a dword onto the stack, using a temporary stack pointer.
9868 *
9869 * @returns Strict VBox status code.
9870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9871 * @param u32Value The value to push.
9872 * @param pTmpRsp Pointer to the temporary stack pointer.
9873 */
9874IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9875{
9876 /* Increment the stack pointer. */
9877 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9878 RTUINT64U NewRsp = *pTmpRsp;
9879 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9880
9881 /* Write the word the lazy way. */
9882 uint32_t *pu32Dst;
9883 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9884 if (rc == VINF_SUCCESS)
9885 {
9886 *pu32Dst = u32Value;
9887 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9888 }
9889
9890 /* Commit the new RSP value unless we an access handler made trouble. */
9891 if (rc == VINF_SUCCESS)
9892 *pTmpRsp = NewRsp;
9893
9894 return rc;
9895}
9896
9897
9898/**
9899 * Pushes a dword onto the stack, using a temporary stack pointer.
9900 *
9901 * @returns Strict VBox status code.
9902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9903 * @param u64Value The value to push.
9904 * @param pTmpRsp Pointer to the temporary stack pointer.
9905 */
9906IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9907{
9908 /* Increment the stack pointer. */
9909 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9910 RTUINT64U NewRsp = *pTmpRsp;
9911 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9912
9913 /* Write the word the lazy way. */
9914 uint64_t *pu64Dst;
9915 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9916 if (rc == VINF_SUCCESS)
9917 {
9918 *pu64Dst = u64Value;
9919 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9920 }
9921
9922 /* Commit the new RSP value unless we an access handler made trouble. */
9923 if (rc == VINF_SUCCESS)
9924 *pTmpRsp = NewRsp;
9925
9926 return rc;
9927}
9928
9929
9930/**
9931 * Pops a word from the stack, using a temporary stack pointer.
9932 *
9933 * @returns Strict VBox status code.
9934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9935 * @param pu16Value Where to store the popped value.
9936 * @param pTmpRsp Pointer to the temporary stack pointer.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9939{
9940 /* Increment the stack pointer. */
9941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9942 RTUINT64U NewRsp = *pTmpRsp;
9943 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9944
9945 /* Write the word the lazy way. */
9946 uint16_t const *pu16Src;
9947 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9948 if (rc == VINF_SUCCESS)
9949 {
9950 *pu16Value = *pu16Src;
9951 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9952
9953 /* Commit the new RSP value. */
9954 if (rc == VINF_SUCCESS)
9955 *pTmpRsp = NewRsp;
9956 }
9957
9958 return rc;
9959}
9960
9961
9962/**
9963 * Pops a dword from the stack, using a temporary stack pointer.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param pu32Value Where to store the popped value.
9968 * @param pTmpRsp Pointer to the temporary stack pointer.
9969 */
9970IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9971{
9972 /* Increment the stack pointer. */
9973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9974 RTUINT64U NewRsp = *pTmpRsp;
9975 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9976
9977 /* Write the word the lazy way. */
9978 uint32_t const *pu32Src;
9979 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9980 if (rc == VINF_SUCCESS)
9981 {
9982 *pu32Value = *pu32Src;
9983 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9984
9985 /* Commit the new RSP value. */
9986 if (rc == VINF_SUCCESS)
9987 *pTmpRsp = NewRsp;
9988 }
9989
9990 return rc;
9991}
9992
9993
9994/**
9995 * Pops a qword from the stack, using a temporary stack pointer.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param pu64Value Where to store the popped value.
10000 * @param pTmpRsp Pointer to the temporary stack pointer.
10001 */
10002IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10003{
10004 /* Increment the stack pointer. */
10005 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10006 RTUINT64U NewRsp = *pTmpRsp;
10007 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10008
10009 /* Write the word the lazy way. */
10010 uint64_t const *pu64Src;
10011 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10012 if (rcStrict == VINF_SUCCESS)
10013 {
10014 *pu64Value = *pu64Src;
10015 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10016
10017 /* Commit the new RSP value. */
10018 if (rcStrict == VINF_SUCCESS)
10019 *pTmpRsp = NewRsp;
10020 }
10021
10022 return rcStrict;
10023}
10024
10025
10026/**
10027 * Begin a special stack push (used by interrupt, exceptions and such).
10028 *
10029 * This will raise \#SS or \#PF if appropriate.
10030 *
10031 * @returns Strict VBox status code.
10032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10033 * @param cbMem The number of bytes to push onto the stack.
10034 * @param ppvMem Where to return the pointer to the stack memory.
10035 * As with the other memory functions this could be
10036 * direct access or bounce buffered access, so
10037 * don't commit register until the commit call
10038 * succeeds.
10039 * @param puNewRsp Where to return the new RSP value. This must be
10040 * passed unchanged to
10041 * iemMemStackPushCommitSpecial().
10042 */
10043IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10044{
10045 Assert(cbMem < UINT8_MAX);
10046 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10047 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10048 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10049}
10050
10051
10052/**
10053 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10054 *
10055 * This will update the rSP.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param pvMem The pointer returned by
10060 * iemMemStackPushBeginSpecial().
10061 * @param uNewRsp The new RSP value returned by
10062 * iemMemStackPushBeginSpecial().
10063 */
10064IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10065{
10066 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10067 if (rcStrict == VINF_SUCCESS)
10068 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10069 return rcStrict;
10070}
10071
10072
10073/**
10074 * Begin a special stack pop (used by iret, retf and such).
10075 *
10076 * This will raise \#SS or \#PF if appropriate.
10077 *
10078 * @returns Strict VBox status code.
10079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10080 * @param cbMem The number of bytes to pop from the stack.
10081 * @param ppvMem Where to return the pointer to the stack memory.
10082 * @param puNewRsp Where to return the new RSP value. This must be
10083 * assigned to CPUMCTX::rsp manually some time
10084 * after iemMemStackPopDoneSpecial() has been
10085 * called.
10086 */
10087IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10088{
10089 Assert(cbMem < UINT8_MAX);
10090 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10091 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10092 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10093}
10094
10095
10096/**
10097 * Continue a special stack pop (used by iret and retf).
10098 *
10099 * This will raise \#SS or \#PF if appropriate.
10100 *
10101 * @returns Strict VBox status code.
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param cbMem The number of bytes to pop from the stack.
10104 * @param ppvMem Where to return the pointer to the stack memory.
10105 * @param puNewRsp Where to return the new RSP value. This must be
10106 * assigned to CPUMCTX::rsp manually some time
10107 * after iemMemStackPopDoneSpecial() has been
10108 * called.
10109 */
10110IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10111{
10112 Assert(cbMem < UINT8_MAX);
10113 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10114 RTUINT64U NewRsp;
10115 NewRsp.u = *puNewRsp;
10116 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10117 *puNewRsp = NewRsp.u;
10118 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10119}
10120
10121
10122/**
10123 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10124 * iemMemStackPopContinueSpecial).
10125 *
10126 * The caller will manually commit the rSP.
10127 *
10128 * @returns Strict VBox status code.
10129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10130 * @param pvMem The pointer returned by
10131 * iemMemStackPopBeginSpecial() or
10132 * iemMemStackPopContinueSpecial().
10133 */
10134IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10135{
10136 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10137}
10138
10139
10140/**
10141 * Fetches a system table byte.
10142 *
10143 * @returns Strict VBox status code.
10144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10145 * @param pbDst Where to return the byte.
10146 * @param iSegReg The index of the segment register to use for
10147 * this access. The base and limits are checked.
10148 * @param GCPtrMem The address of the guest memory.
10149 */
10150IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10151{
10152 /* The lazy approach for now... */
10153 uint8_t const *pbSrc;
10154 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10155 if (rc == VINF_SUCCESS)
10156 {
10157 *pbDst = *pbSrc;
10158 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10159 }
10160 return rc;
10161}
10162
10163
10164/**
10165 * Fetches a system table word.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param pu16Dst Where to return the word.
10170 * @param iSegReg The index of the segment register to use for
10171 * this access. The base and limits are checked.
10172 * @param GCPtrMem The address of the guest memory.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10175{
10176 /* The lazy approach for now... */
10177 uint16_t const *pu16Src;
10178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10179 if (rc == VINF_SUCCESS)
10180 {
10181 *pu16Dst = *pu16Src;
10182 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10183 }
10184 return rc;
10185}
10186
10187
10188/**
10189 * Fetches a system table dword.
10190 *
10191 * @returns Strict VBox status code.
10192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10193 * @param pu32Dst Where to return the dword.
10194 * @param iSegReg The index of the segment register to use for
10195 * this access. The base and limits are checked.
10196 * @param GCPtrMem The address of the guest memory.
10197 */
10198IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10199{
10200 /* The lazy approach for now... */
10201 uint32_t const *pu32Src;
10202 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10203 if (rc == VINF_SUCCESS)
10204 {
10205 *pu32Dst = *pu32Src;
10206 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10207 }
10208 return rc;
10209}
10210
10211
10212/**
10213 * Fetches a system table qword.
10214 *
10215 * @returns Strict VBox status code.
10216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10217 * @param pu64Dst Where to return the qword.
10218 * @param iSegReg The index of the segment register to use for
10219 * this access. The base and limits are checked.
10220 * @param GCPtrMem The address of the guest memory.
10221 */
10222IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10223{
10224 /* The lazy approach for now... */
10225 uint64_t const *pu64Src;
10226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10227 if (rc == VINF_SUCCESS)
10228 {
10229 *pu64Dst = *pu64Src;
10230 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10231 }
10232 return rc;
10233}
10234
10235
10236/**
10237 * Fetches a descriptor table entry with caller specified error code.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param pDesc Where to return the descriptor table entry.
10242 * @param uSel The selector which table entry to fetch.
10243 * @param uXcpt The exception to raise on table lookup error.
10244 * @param uErrorCode The error code associated with the exception.
10245 */
10246IEM_STATIC VBOXSTRICTRC
10247iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10248{
10249 AssertPtr(pDesc);
10250 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10251
10252 /** @todo did the 286 require all 8 bytes to be accessible? */
10253 /*
10254 * Get the selector table base and check bounds.
10255 */
10256 RTGCPTR GCPtrBase;
10257 if (uSel & X86_SEL_LDT)
10258 {
10259 if ( !pCtx->ldtr.Attr.n.u1Present
10260 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10261 {
10262 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10263 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10264 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10265 uErrorCode, 0);
10266 }
10267
10268 Assert(pCtx->ldtr.Attr.n.u1Present);
10269 GCPtrBase = pCtx->ldtr.u64Base;
10270 }
10271 else
10272 {
10273 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10274 {
10275 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10276 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10277 uErrorCode, 0);
10278 }
10279 GCPtrBase = pCtx->gdtr.pGdt;
10280 }
10281
10282 /*
10283 * Read the legacy descriptor and maybe the long mode extensions if
10284 * required.
10285 */
10286 VBOXSTRICTRC rcStrict;
10287 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10288 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10289 else
10290 {
10291 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10292 if (rcStrict == VINF_SUCCESS)
10293 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10294 if (rcStrict == VINF_SUCCESS)
10295 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10296 if (rcStrict == VINF_SUCCESS)
10297 pDesc->Legacy.au16[3] = 0;
10298 else
10299 return rcStrict;
10300 }
10301
10302 if (rcStrict == VINF_SUCCESS)
10303 {
10304 if ( !IEM_IS_LONG_MODE(pVCpu)
10305 || pDesc->Legacy.Gen.u1DescType)
10306 pDesc->Long.au64[1] = 0;
10307 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10308 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10309 else
10310 {
10311 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10312 /** @todo is this the right exception? */
10313 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10314 }
10315 }
10316 return rcStrict;
10317}
10318
10319
10320/**
10321 * Fetches a descriptor table entry.
10322 *
10323 * @returns Strict VBox status code.
10324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10325 * @param pDesc Where to return the descriptor table entry.
10326 * @param uSel The selector which table entry to fetch.
10327 * @param uXcpt The exception to raise on table lookup error.
10328 */
10329IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10330{
10331 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10332}
10333
10334
10335/**
10336 * Fakes a long mode stack selector for SS = 0.
10337 *
10338 * @param pDescSs Where to return the fake stack descriptor.
10339 * @param uDpl The DPL we want.
10340 */
10341IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10342{
10343 pDescSs->Long.au64[0] = 0;
10344 pDescSs->Long.au64[1] = 0;
10345 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10346 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10347 pDescSs->Long.Gen.u2Dpl = uDpl;
10348 pDescSs->Long.Gen.u1Present = 1;
10349 pDescSs->Long.Gen.u1Long = 1;
10350}
10351
10352
10353/**
10354 * Marks the selector descriptor as accessed (only non-system descriptors).
10355 *
10356 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10357 * will therefore skip the limit checks.
10358 *
10359 * @returns Strict VBox status code.
10360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10361 * @param uSel The selector.
10362 */
10363IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10364{
10365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10366
10367 /*
10368 * Get the selector table base and calculate the entry address.
10369 */
10370 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10371 ? pCtx->ldtr.u64Base
10372 : pCtx->gdtr.pGdt;
10373 GCPtr += uSel & X86_SEL_MASK;
10374
10375 /*
10376 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10377 * ugly stuff to avoid this. This will make sure it's an atomic access
10378 * as well more or less remove any question about 8-bit or 32-bit accesss.
10379 */
10380 VBOXSTRICTRC rcStrict;
10381 uint32_t volatile *pu32;
10382 if ((GCPtr & 3) == 0)
10383 {
10384 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10385 GCPtr += 2 + 2;
10386 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10387 if (rcStrict != VINF_SUCCESS)
10388 return rcStrict;
10389 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10390 }
10391 else
10392 {
10393 /* The misaligned GDT/LDT case, map the whole thing. */
10394 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10395 if (rcStrict != VINF_SUCCESS)
10396 return rcStrict;
10397 switch ((uintptr_t)pu32 & 3)
10398 {
10399 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10400 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10401 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10402 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10403 }
10404 }
10405
10406 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10407}
10408
10409/** @} */
10410
10411
10412/*
10413 * Include the C/C++ implementation of instruction.
10414 */
10415#include "IEMAllCImpl.cpp.h"
10416
10417
10418
10419/** @name "Microcode" macros.
10420 *
10421 * The idea is that we should be able to use the same code to interpret
10422 * instructions as well as recompiler instructions. Thus this obfuscation.
10423 *
10424 * @{
10425 */
10426#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10427#define IEM_MC_END() }
10428#define IEM_MC_PAUSE() do {} while (0)
10429#define IEM_MC_CONTINUE() do {} while (0)
10430
10431/** Internal macro. */
10432#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10433 do \
10434 { \
10435 VBOXSTRICTRC rcStrict2 = a_Expr; \
10436 if (rcStrict2 != VINF_SUCCESS) \
10437 return rcStrict2; \
10438 } while (0)
10439
10440
10441#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10442#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10443#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10444#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10445#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10446#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10447#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10448#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10449#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10450 do { \
10451 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10452 return iemRaiseDeviceNotAvailable(pVCpu); \
10453 } while (0)
10454#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10455 do { \
10456 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10457 return iemRaiseDeviceNotAvailable(pVCpu); \
10458 } while (0)
10459#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10460 do { \
10461 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10462 return iemRaiseMathFault(pVCpu); \
10463 } while (0)
10464#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10465 do { \
10466 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10467 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10468 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10469 return iemRaiseUndefinedOpcode(pVCpu); \
10470 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10471 return iemRaiseDeviceNotAvailable(pVCpu); \
10472 } while (0)
10473#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10474 do { \
10475 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10476 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10477 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10478 return iemRaiseUndefinedOpcode(pVCpu); \
10479 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10480 return iemRaiseDeviceNotAvailable(pVCpu); \
10481 } while (0)
10482#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10483 do { \
10484 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10485 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10486 return iemRaiseUndefinedOpcode(pVCpu); \
10487 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10488 return iemRaiseDeviceNotAvailable(pVCpu); \
10489 } while (0)
10490#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10491 do { \
10492 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10493 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10494 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10495 return iemRaiseUndefinedOpcode(pVCpu); \
10496 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10497 return iemRaiseDeviceNotAvailable(pVCpu); \
10498 } while (0)
10499#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10500 do { \
10501 if (pVCpu->iem.s.uCpl != 0) \
10502 return iemRaiseGeneralProtectionFault0(pVCpu); \
10503 } while (0)
10504#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10505 do { \
10506 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10507 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10508 } while (0)
10509
10510
10511#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10512#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10513#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10514#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10515#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10516#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10517#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10518 uint32_t a_Name; \
10519 uint32_t *a_pName = &a_Name
10520#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10521 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10522
10523#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10524#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10525
10526#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10527#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10528#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10529#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10530#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10531#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10532#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10533#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10534#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10535#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10536#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10537#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10538#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10539#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10540#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10541#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10542#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10543#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10544#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10545#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10546#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10547#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10548#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10549#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10550#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10551#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10552#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10553#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10554#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10555/** @note Not for IOPL or IF testing or modification. */
10556#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10557#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10558#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10559#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10560
10561#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10562#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10563#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10564#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10565#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10566#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10567#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10568#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10569#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10570#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10571#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10572 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10573
10574#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10575#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10576/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10577 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10578#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10579#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10580/** @note Not for IOPL or IF testing or modification. */
10581#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10582
10583#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10584#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10585#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10586 do { \
10587 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10588 *pu32Reg += (a_u32Value); \
10589 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10590 } while (0)
10591#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10592
10593#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10594#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10595#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10596 do { \
10597 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10598 *pu32Reg -= (a_u32Value); \
10599 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10600 } while (0)
10601#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10602#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10603
10604#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10605#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10606#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10607#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10608#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10609#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10610#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10611
10612#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10613#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10614#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10615#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10616
10617#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10618#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10619#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10620
10621#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10622#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10623#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10624
10625#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10626#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10627#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10628
10629#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10630#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10631#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10632
10633#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10634
10635#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10636
10637#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10638#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10639#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10640 do { \
10641 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10642 *pu32Reg &= (a_u32Value); \
10643 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10644 } while (0)
10645#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10646
10647#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10648#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10649#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10650 do { \
10651 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10652 *pu32Reg |= (a_u32Value); \
10653 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10654 } while (0)
10655#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10656
10657
10658/** @note Not for IOPL or IF modification. */
10659#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10660/** @note Not for IOPL or IF modification. */
10661#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10662/** @note Not for IOPL or IF modification. */
10663#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10664
10665#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10666
10667
10668#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10669 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10670#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10671 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10672#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10673 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10674#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10675 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10676#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10677 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10678#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10679 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10680#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10681 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10682
10683#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10684 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10685#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10686 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10687#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10688 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10689#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10690 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10691#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10692 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10693#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10694 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10695 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10696 } while (0)
10697#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10698 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10699 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10700 } while (0)
10701#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10702 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10703#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10704 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10705#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10706 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10707#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10708 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10709 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10710
10711#ifndef IEM_WITH_SETJMP
10712# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10714# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10716# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10718#else
10719# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10720 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10721# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10722 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10723# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10724 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10725#endif
10726
10727#ifndef IEM_WITH_SETJMP
10728# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10730# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10732# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10734#else
10735# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10736 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10737# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10738 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10739# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10740 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10741#endif
10742
10743#ifndef IEM_WITH_SETJMP
10744# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10746# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10748# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10750#else
10751# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10752 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10753# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10754 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10755# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10756 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10757#endif
10758
10759#ifdef SOME_UNUSED_FUNCTION
10760# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10762#endif
10763
10764#ifndef IEM_WITH_SETJMP
10765# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10767# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10769# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10771# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10773#else
10774# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10775 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10776# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10777 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10778# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10779 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10780# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10781 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10782#endif
10783
10784#ifndef IEM_WITH_SETJMP
10785# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10787# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10789# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10791#else
10792# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10793 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10794# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10795 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10796# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10797 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10798#endif
10799
10800#ifndef IEM_WITH_SETJMP
10801# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10803# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10805#else
10806# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10807 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10808# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10809 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10810#endif
10811
10812
10813
10814#ifndef IEM_WITH_SETJMP
10815# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10816 do { \
10817 uint8_t u8Tmp; \
10818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10819 (a_u16Dst) = u8Tmp; \
10820 } while (0)
10821# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10822 do { \
10823 uint8_t u8Tmp; \
10824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10825 (a_u32Dst) = u8Tmp; \
10826 } while (0)
10827# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10828 do { \
10829 uint8_t u8Tmp; \
10830 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10831 (a_u64Dst) = u8Tmp; \
10832 } while (0)
10833# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10834 do { \
10835 uint16_t u16Tmp; \
10836 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10837 (a_u32Dst) = u16Tmp; \
10838 } while (0)
10839# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10840 do { \
10841 uint16_t u16Tmp; \
10842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10843 (a_u64Dst) = u16Tmp; \
10844 } while (0)
10845# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10846 do { \
10847 uint32_t u32Tmp; \
10848 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10849 (a_u64Dst) = u32Tmp; \
10850 } while (0)
10851#else /* IEM_WITH_SETJMP */
10852# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10853 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10854# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10855 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10856# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10857 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10858# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10859 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10860# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10861 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10862# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10863 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10864#endif /* IEM_WITH_SETJMP */
10865
10866#ifndef IEM_WITH_SETJMP
10867# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10868 do { \
10869 uint8_t u8Tmp; \
10870 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10871 (a_u16Dst) = (int8_t)u8Tmp; \
10872 } while (0)
10873# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10874 do { \
10875 uint8_t u8Tmp; \
10876 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10877 (a_u32Dst) = (int8_t)u8Tmp; \
10878 } while (0)
10879# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10880 do { \
10881 uint8_t u8Tmp; \
10882 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10883 (a_u64Dst) = (int8_t)u8Tmp; \
10884 } while (0)
10885# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10886 do { \
10887 uint16_t u16Tmp; \
10888 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10889 (a_u32Dst) = (int16_t)u16Tmp; \
10890 } while (0)
10891# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10892 do { \
10893 uint16_t u16Tmp; \
10894 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10895 (a_u64Dst) = (int16_t)u16Tmp; \
10896 } while (0)
10897# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10898 do { \
10899 uint32_t u32Tmp; \
10900 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10901 (a_u64Dst) = (int32_t)u32Tmp; \
10902 } while (0)
10903#else /* IEM_WITH_SETJMP */
10904# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10905 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10906# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10907 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10908# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10909 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10910# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10911 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10912# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10913 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10914# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10915 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10916#endif /* IEM_WITH_SETJMP */
10917
10918#ifndef IEM_WITH_SETJMP
10919# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10920 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10921# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10923# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10925# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10926 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10927#else
10928# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10929 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10930# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10931 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10932# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10933 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10934# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10935 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10936#endif
10937
10938#ifndef IEM_WITH_SETJMP
10939# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10940 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10941# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10942 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10943# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10944 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10945# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10946 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10947#else
10948# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10949 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10950# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10951 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10952# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10953 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10954# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10955 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10956#endif
10957
10958#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10959#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10960#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10961#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10962#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10963#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10964#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10965 do { \
10966 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10967 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10968 } while (0)
10969
10970#ifndef IEM_WITH_SETJMP
10971# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10972 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10973# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10974 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10975#else
10976# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10977 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10978# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10979 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10980#endif
10981
10982
10983#define IEM_MC_PUSH_U16(a_u16Value) \
10984 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10985#define IEM_MC_PUSH_U32(a_u32Value) \
10986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10987#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10989#define IEM_MC_PUSH_U64(a_u64Value) \
10990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10991
10992#define IEM_MC_POP_U16(a_pu16Value) \
10993 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10994#define IEM_MC_POP_U32(a_pu32Value) \
10995 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10996#define IEM_MC_POP_U64(a_pu64Value) \
10997 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10998
10999/** Maps guest memory for direct or bounce buffered access.
11000 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11001 * @remarks May return.
11002 */
11003#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11004 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11005
11006/** Maps guest memory for direct or bounce buffered access.
11007 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11008 * @remarks May return.
11009 */
11010#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11011 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11012
11013/** Commits the memory and unmaps the guest memory.
11014 * @remarks May return.
11015 */
11016#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11017 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11018
11019/** Commits the memory and unmaps the guest memory unless the FPU status word
11020 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11021 * that would cause FLD not to store.
11022 *
11023 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11024 * store, while \#P will not.
11025 *
11026 * @remarks May in theory return - for now.
11027 */
11028#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11029 do { \
11030 if ( !(a_u16FSW & X86_FSW_ES) \
11031 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11032 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11033 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11034 } while (0)
11035
11036/** Calculate efficient address from R/M. */
11037#ifndef IEM_WITH_SETJMP
11038# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11039 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11040#else
11041# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11042 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11043#endif
11044
11045#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11046#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11047#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11048#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11049#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11050#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11051#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11052
11053/**
11054 * Defers the rest of the instruction emulation to a C implementation routine
11055 * and returns, only taking the standard parameters.
11056 *
11057 * @param a_pfnCImpl The pointer to the C routine.
11058 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11059 */
11060#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11061
11062/**
11063 * Defers the rest of instruction emulation to a C implementation routine and
11064 * returns, taking one argument in addition to the standard ones.
11065 *
11066 * @param a_pfnCImpl The pointer to the C routine.
11067 * @param a0 The argument.
11068 */
11069#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11070
11071/**
11072 * Defers the rest of the instruction emulation to a C implementation routine
11073 * and returns, taking two arguments in addition to the standard ones.
11074 *
11075 * @param a_pfnCImpl The pointer to the C routine.
11076 * @param a0 The first extra argument.
11077 * @param a1 The second extra argument.
11078 */
11079#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11080
11081/**
11082 * Defers the rest of the instruction emulation to a C implementation routine
11083 * and returns, taking three arguments in addition to the standard ones.
11084 *
11085 * @param a_pfnCImpl The pointer to the C routine.
11086 * @param a0 The first extra argument.
11087 * @param a1 The second extra argument.
11088 * @param a2 The third extra argument.
11089 */
11090#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11091
11092/**
11093 * Defers the rest of the instruction emulation to a C implementation routine
11094 * and returns, taking four arguments in addition to the standard ones.
11095 *
11096 * @param a_pfnCImpl The pointer to the C routine.
11097 * @param a0 The first extra argument.
11098 * @param a1 The second extra argument.
11099 * @param a2 The third extra argument.
11100 * @param a3 The fourth extra argument.
11101 */
11102#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11103
11104/**
11105 * Defers the rest of the instruction emulation to a C implementation routine
11106 * and returns, taking two arguments in addition to the standard ones.
11107 *
11108 * @param a_pfnCImpl The pointer to the C routine.
11109 * @param a0 The first extra argument.
11110 * @param a1 The second extra argument.
11111 * @param a2 The third extra argument.
11112 * @param a3 The fourth extra argument.
11113 * @param a4 The fifth extra argument.
11114 */
11115#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11116
11117/**
11118 * Defers the entire instruction emulation to a C implementation routine and
11119 * returns, only taking the standard parameters.
11120 *
11121 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11122 *
11123 * @param a_pfnCImpl The pointer to the C routine.
11124 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11125 */
11126#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11127
11128/**
11129 * Defers the entire instruction emulation to a C implementation routine and
11130 * returns, taking one argument in addition to the standard ones.
11131 *
11132 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11133 *
11134 * @param a_pfnCImpl The pointer to the C routine.
11135 * @param a0 The argument.
11136 */
11137#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11138
11139/**
11140 * Defers the entire instruction emulation to a C implementation routine and
11141 * returns, taking two arguments in addition to the standard ones.
11142 *
11143 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11144 *
11145 * @param a_pfnCImpl The pointer to the C routine.
11146 * @param a0 The first extra argument.
11147 * @param a1 The second extra argument.
11148 */
11149#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11150
11151/**
11152 * Defers the entire instruction emulation to a C implementation routine and
11153 * returns, taking three arguments in addition to the standard ones.
11154 *
11155 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11156 *
11157 * @param a_pfnCImpl The pointer to the C routine.
11158 * @param a0 The first extra argument.
11159 * @param a1 The second extra argument.
11160 * @param a2 The third extra argument.
11161 */
11162#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11163
11164/**
11165 * Calls a FPU assembly implementation taking one visible argument.
11166 *
11167 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11168 * @param a0 The first extra argument.
11169 */
11170#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11171 do { \
11172 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11173 } while (0)
11174
11175/**
11176 * Calls a FPU assembly implementation taking two visible arguments.
11177 *
11178 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11179 * @param a0 The first extra argument.
11180 * @param a1 The second extra argument.
11181 */
11182#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11183 do { \
11184 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11185 } while (0)
11186
11187/**
11188 * Calls a FPU assembly implementation taking three visible arguments.
11189 *
11190 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11191 * @param a0 The first extra argument.
11192 * @param a1 The second extra argument.
11193 * @param a2 The third extra argument.
11194 */
11195#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11196 do { \
11197 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11198 } while (0)
11199
11200#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11201 do { \
11202 (a_FpuData).FSW = (a_FSW); \
11203 (a_FpuData).r80Result = *(a_pr80Value); \
11204 } while (0)
11205
11206/** Pushes FPU result onto the stack. */
11207#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11208 iemFpuPushResult(pVCpu, &a_FpuData)
11209/** Pushes FPU result onto the stack and sets the FPUDP. */
11210#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11211 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11212
11213/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11214#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11215 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11216
11217/** Stores FPU result in a stack register. */
11218#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11219 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11220/** Stores FPU result in a stack register and pops the stack. */
11221#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11222 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11223/** Stores FPU result in a stack register and sets the FPUDP. */
11224#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11225 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11226/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11227 * stack. */
11228#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11229 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11230
11231/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11232#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11233 iemFpuUpdateOpcodeAndIp(pVCpu)
11234/** Free a stack register (for FFREE and FFREEP). */
11235#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11236 iemFpuStackFree(pVCpu, a_iStReg)
11237/** Increment the FPU stack pointer. */
11238#define IEM_MC_FPU_STACK_INC_TOP() \
11239 iemFpuStackIncTop(pVCpu)
11240/** Decrement the FPU stack pointer. */
11241#define IEM_MC_FPU_STACK_DEC_TOP() \
11242 iemFpuStackDecTop(pVCpu)
11243
11244/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11245#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11246 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11247/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11248#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11249 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11250/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11251#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11252 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11253/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11254#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11255 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11256/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11257 * stack. */
11258#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11259 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11260/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11261#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11262 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11263
11264/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11265#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11266 iemFpuStackUnderflow(pVCpu, a_iStDst)
11267/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11268 * stack. */
11269#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11270 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11271/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11272 * FPUDS. */
11273#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11274 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11275/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11276 * FPUDS. Pops stack. */
11277#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11278 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11279/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11280 * stack twice. */
11281#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11282 iemFpuStackUnderflowThenPopPop(pVCpu)
11283/** Raises a FPU stack underflow exception for an instruction pushing a result
11284 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11285#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11286 iemFpuStackPushUnderflow(pVCpu)
11287/** Raises a FPU stack underflow exception for an instruction pushing a result
11288 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11289#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11290 iemFpuStackPushUnderflowTwo(pVCpu)
11291
11292/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11293 * FPUIP, FPUCS and FOP. */
11294#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11295 iemFpuStackPushOverflow(pVCpu)
11296/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11297 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11298#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11299 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11300/** Prepares for using the FPU state.
11301 * Ensures that we can use the host FPU in the current context (RC+R0.
11302 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11303#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11304/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11305#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11306/** Actualizes the guest FPU state so it can be accessed and modified. */
11307#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11308
11309/** Prepares for using the SSE state.
11310 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11311 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11312#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11313/** Actualizes the guest XMM0..15 register state for read-only access. */
11314#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11315/** Actualizes the guest XMM0..15 register state for read-write access. */
11316#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11317
11318/**
11319 * Calls a MMX assembly implementation taking two visible arguments.
11320 *
11321 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11322 * @param a0 The first extra argument.
11323 * @param a1 The second extra argument.
11324 */
11325#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11326 do { \
11327 IEM_MC_PREPARE_FPU_USAGE(); \
11328 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11329 } while (0)
11330
11331/**
11332 * Calls a MMX assembly implementation taking three visible arguments.
11333 *
11334 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11335 * @param a0 The first extra argument.
11336 * @param a1 The second extra argument.
11337 * @param a2 The third extra argument.
11338 */
11339#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11340 do { \
11341 IEM_MC_PREPARE_FPU_USAGE(); \
11342 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11343 } while (0)
11344
11345
11346/**
11347 * Calls a SSE assembly implementation taking two visible arguments.
11348 *
11349 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11350 * @param a0 The first extra argument.
11351 * @param a1 The second extra argument.
11352 */
11353#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11354 do { \
11355 IEM_MC_PREPARE_SSE_USAGE(); \
11356 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11357 } while (0)
11358
11359/**
11360 * Calls a SSE assembly implementation taking three visible arguments.
11361 *
11362 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11363 * @param a0 The first extra argument.
11364 * @param a1 The second extra argument.
11365 * @param a2 The third extra argument.
11366 */
11367#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11368 do { \
11369 IEM_MC_PREPARE_SSE_USAGE(); \
11370 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11371 } while (0)
11372
11373/** @note Not for IOPL or IF testing. */
11374#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11375/** @note Not for IOPL or IF testing. */
11376#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11377/** @note Not for IOPL or IF testing. */
11378#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11379/** @note Not for IOPL or IF testing. */
11380#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11381/** @note Not for IOPL or IF testing. */
11382#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11383 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11384 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11385/** @note Not for IOPL or IF testing. */
11386#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11387 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11388 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11389/** @note Not for IOPL or IF testing. */
11390#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11391 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11392 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11393 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11394/** @note Not for IOPL or IF testing. */
11395#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11396 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11397 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11398 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11399#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11400#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11401#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11402/** @note Not for IOPL or IF testing. */
11403#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11404 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11405 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11406/** @note Not for IOPL or IF testing. */
11407#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11408 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11409 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11410/** @note Not for IOPL or IF testing. */
11411#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11412 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11413 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11414/** @note Not for IOPL or IF testing. */
11415#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11416 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11417 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11418/** @note Not for IOPL or IF testing. */
11419#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11420 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11421 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11422/** @note Not for IOPL or IF testing. */
11423#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11424 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11425 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11426#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11427#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11428
11429#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11430 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11431#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11432 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11433#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11434 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11435#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11436 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11437#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11438 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11439#define IEM_MC_IF_FCW_IM() \
11440 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11441
11442#define IEM_MC_ELSE() } else {
11443#define IEM_MC_ENDIF() } do {} while (0)
11444
11445/** @} */
11446
11447
11448/** @name Opcode Debug Helpers.
11449 * @{
11450 */
11451#ifdef VBOX_WITH_STATISTICS
11452# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11453#else
11454# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11455#endif
11456
11457#ifdef DEBUG
11458# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11459 do { \
11460 IEMOP_INC_STATS(a_Stats); \
11461 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11462 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11463 } while (0)
11464#else
11465# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11466#endif
11467
11468/** @} */
11469
11470
11471/** @name Opcode Helpers.
11472 * @{
11473 */
11474
11475#ifdef IN_RING3
11476# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11477 do { \
11478 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11479 else \
11480 { \
11481 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11482 return IEMOP_RAISE_INVALID_OPCODE(); \
11483 } \
11484 } while (0)
11485#else
11486# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11487 do { \
11488 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11489 else return IEMOP_RAISE_INVALID_OPCODE(); \
11490 } while (0)
11491#endif
11492
11493/** The instruction requires a 186 or later. */
11494#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11495# define IEMOP_HLP_MIN_186() do { } while (0)
11496#else
11497# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11498#endif
11499
11500/** The instruction requires a 286 or later. */
11501#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11502# define IEMOP_HLP_MIN_286() do { } while (0)
11503#else
11504# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11505#endif
11506
11507/** The instruction requires a 386 or later. */
11508#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11509# define IEMOP_HLP_MIN_386() do { } while (0)
11510#else
11511# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11512#endif
11513
11514/** The instruction requires a 386 or later if the given expression is true. */
11515#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11516# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11517#else
11518# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11519#endif
11520
11521/** The instruction requires a 486 or later. */
11522#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11523# define IEMOP_HLP_MIN_486() do { } while (0)
11524#else
11525# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11526#endif
11527
11528/** The instruction requires a Pentium (586) or later. */
11529#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11530# define IEMOP_HLP_MIN_586() do { } while (0)
11531#else
11532# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11533#endif
11534
11535/** The instruction requires a PentiumPro (686) or later. */
11536#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11537# define IEMOP_HLP_MIN_686() do { } while (0)
11538#else
11539# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11540#endif
11541
11542
11543/** The instruction raises an \#UD in real and V8086 mode. */
11544#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11545 do \
11546 { \
11547 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11548 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11549 } while (0)
11550
11551/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11552 * 64-bit mode. */
11553#define IEMOP_HLP_NO_64BIT() \
11554 do \
11555 { \
11556 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11557 return IEMOP_RAISE_INVALID_OPCODE(); \
11558 } while (0)
11559
11560/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11561 * 64-bit mode. */
11562#define IEMOP_HLP_ONLY_64BIT() \
11563 do \
11564 { \
11565 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11566 return IEMOP_RAISE_INVALID_OPCODE(); \
11567 } while (0)
11568
11569/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11570#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11571 do \
11572 { \
11573 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11574 iemRecalEffOpSize64Default(pVCpu); \
11575 } while (0)
11576
11577/** The instruction has 64-bit operand size if 64-bit mode. */
11578#define IEMOP_HLP_64BIT_OP_SIZE() \
11579 do \
11580 { \
11581 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11582 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11583 } while (0)
11584
11585/** Only a REX prefix immediately preceeding the first opcode byte takes
11586 * effect. This macro helps ensuring this as well as logging bad guest code. */
11587#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11588 do \
11589 { \
11590 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11591 { \
11592 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11593 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11594 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11595 pVCpu->iem.s.uRexB = 0; \
11596 pVCpu->iem.s.uRexIndex = 0; \
11597 pVCpu->iem.s.uRexReg = 0; \
11598 iemRecalEffOpSize(pVCpu); \
11599 } \
11600 } while (0)
11601
11602/**
11603 * Done decoding.
11604 */
11605#define IEMOP_HLP_DONE_DECODING() \
11606 do \
11607 { \
11608 /*nothing for now, maybe later... */ \
11609 } while (0)
11610
11611/**
11612 * Done decoding, raise \#UD exception if lock prefix present.
11613 */
11614#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11615 do \
11616 { \
11617 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11618 { /* likely */ } \
11619 else \
11620 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11621 } while (0)
11622#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11623 do \
11624 { \
11625 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11626 { /* likely */ } \
11627 else \
11628 { \
11629 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11630 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11631 } \
11632 } while (0)
11633#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11634 do \
11635 { \
11636 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11637 { /* likely */ } \
11638 else \
11639 { \
11640 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11641 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11642 } \
11643 } while (0)
11644
11645/**
11646 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11647 * are present.
11648 */
11649#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11650 do \
11651 { \
11652 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11653 { /* likely */ } \
11654 else \
11655 return IEMOP_RAISE_INVALID_OPCODE(); \
11656 } while (0)
11657
11658
11659/**
11660 * Calculates the effective address of a ModR/M memory operand.
11661 *
11662 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11663 *
11664 * @return Strict VBox status code.
11665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11666 * @param bRm The ModRM byte.
11667 * @param cbImm The size of any immediate following the
11668 * effective address opcode bytes. Important for
11669 * RIP relative addressing.
11670 * @param pGCPtrEff Where to return the effective address.
11671 */
11672IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11673{
11674 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11675 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11676# define SET_SS_DEF() \
11677 do \
11678 { \
11679 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11680 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11681 } while (0)
11682
11683 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11684 {
11685/** @todo Check the effective address size crap! */
11686 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11687 {
11688 uint16_t u16EffAddr;
11689
11690 /* Handle the disp16 form with no registers first. */
11691 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11692 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11693 else
11694 {
11695 /* Get the displacment. */
11696 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11697 {
11698 case 0: u16EffAddr = 0; break;
11699 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11700 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11701 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11702 }
11703
11704 /* Add the base and index registers to the disp. */
11705 switch (bRm & X86_MODRM_RM_MASK)
11706 {
11707 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11708 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11709 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11710 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11711 case 4: u16EffAddr += pCtx->si; break;
11712 case 5: u16EffAddr += pCtx->di; break;
11713 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11714 case 7: u16EffAddr += pCtx->bx; break;
11715 }
11716 }
11717
11718 *pGCPtrEff = u16EffAddr;
11719 }
11720 else
11721 {
11722 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11723 uint32_t u32EffAddr;
11724
11725 /* Handle the disp32 form with no registers first. */
11726 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11727 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11728 else
11729 {
11730 /* Get the register (or SIB) value. */
11731 switch ((bRm & X86_MODRM_RM_MASK))
11732 {
11733 case 0: u32EffAddr = pCtx->eax; break;
11734 case 1: u32EffAddr = pCtx->ecx; break;
11735 case 2: u32EffAddr = pCtx->edx; break;
11736 case 3: u32EffAddr = pCtx->ebx; break;
11737 case 4: /* SIB */
11738 {
11739 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11740
11741 /* Get the index and scale it. */
11742 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11743 {
11744 case 0: u32EffAddr = pCtx->eax; break;
11745 case 1: u32EffAddr = pCtx->ecx; break;
11746 case 2: u32EffAddr = pCtx->edx; break;
11747 case 3: u32EffAddr = pCtx->ebx; break;
11748 case 4: u32EffAddr = 0; /*none */ break;
11749 case 5: u32EffAddr = pCtx->ebp; break;
11750 case 6: u32EffAddr = pCtx->esi; break;
11751 case 7: u32EffAddr = pCtx->edi; break;
11752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11753 }
11754 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11755
11756 /* add base */
11757 switch (bSib & X86_SIB_BASE_MASK)
11758 {
11759 case 0: u32EffAddr += pCtx->eax; break;
11760 case 1: u32EffAddr += pCtx->ecx; break;
11761 case 2: u32EffAddr += pCtx->edx; break;
11762 case 3: u32EffAddr += pCtx->ebx; break;
11763 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11764 case 5:
11765 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11766 {
11767 u32EffAddr += pCtx->ebp;
11768 SET_SS_DEF();
11769 }
11770 else
11771 {
11772 uint32_t u32Disp;
11773 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11774 u32EffAddr += u32Disp;
11775 }
11776 break;
11777 case 6: u32EffAddr += pCtx->esi; break;
11778 case 7: u32EffAddr += pCtx->edi; break;
11779 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11780 }
11781 break;
11782 }
11783 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11784 case 6: u32EffAddr = pCtx->esi; break;
11785 case 7: u32EffAddr = pCtx->edi; break;
11786 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11787 }
11788
11789 /* Get and add the displacement. */
11790 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11791 {
11792 case 0:
11793 break;
11794 case 1:
11795 {
11796 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11797 u32EffAddr += i8Disp;
11798 break;
11799 }
11800 case 2:
11801 {
11802 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11803 u32EffAddr += u32Disp;
11804 break;
11805 }
11806 default:
11807 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11808 }
11809
11810 }
11811 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11812 *pGCPtrEff = u32EffAddr;
11813 else
11814 {
11815 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11816 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11817 }
11818 }
11819 }
11820 else
11821 {
11822 uint64_t u64EffAddr;
11823
11824 /* Handle the rip+disp32 form with no registers first. */
11825 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11826 {
11827 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11828 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11829 }
11830 else
11831 {
11832 /* Get the register (or SIB) value. */
11833 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11834 {
11835 case 0: u64EffAddr = pCtx->rax; break;
11836 case 1: u64EffAddr = pCtx->rcx; break;
11837 case 2: u64EffAddr = pCtx->rdx; break;
11838 case 3: u64EffAddr = pCtx->rbx; break;
11839 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11840 case 6: u64EffAddr = pCtx->rsi; break;
11841 case 7: u64EffAddr = pCtx->rdi; break;
11842 case 8: u64EffAddr = pCtx->r8; break;
11843 case 9: u64EffAddr = pCtx->r9; break;
11844 case 10: u64EffAddr = pCtx->r10; break;
11845 case 11: u64EffAddr = pCtx->r11; break;
11846 case 13: u64EffAddr = pCtx->r13; break;
11847 case 14: u64EffAddr = pCtx->r14; break;
11848 case 15: u64EffAddr = pCtx->r15; break;
11849 /* SIB */
11850 case 4:
11851 case 12:
11852 {
11853 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11854
11855 /* Get the index and scale it. */
11856 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11857 {
11858 case 0: u64EffAddr = pCtx->rax; break;
11859 case 1: u64EffAddr = pCtx->rcx; break;
11860 case 2: u64EffAddr = pCtx->rdx; break;
11861 case 3: u64EffAddr = pCtx->rbx; break;
11862 case 4: u64EffAddr = 0; /*none */ break;
11863 case 5: u64EffAddr = pCtx->rbp; break;
11864 case 6: u64EffAddr = pCtx->rsi; break;
11865 case 7: u64EffAddr = pCtx->rdi; break;
11866 case 8: u64EffAddr = pCtx->r8; break;
11867 case 9: u64EffAddr = pCtx->r9; break;
11868 case 10: u64EffAddr = pCtx->r10; break;
11869 case 11: u64EffAddr = pCtx->r11; break;
11870 case 12: u64EffAddr = pCtx->r12; break;
11871 case 13: u64EffAddr = pCtx->r13; break;
11872 case 14: u64EffAddr = pCtx->r14; break;
11873 case 15: u64EffAddr = pCtx->r15; break;
11874 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11875 }
11876 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11877
11878 /* add base */
11879 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11880 {
11881 case 0: u64EffAddr += pCtx->rax; break;
11882 case 1: u64EffAddr += pCtx->rcx; break;
11883 case 2: u64EffAddr += pCtx->rdx; break;
11884 case 3: u64EffAddr += pCtx->rbx; break;
11885 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11886 case 6: u64EffAddr += pCtx->rsi; break;
11887 case 7: u64EffAddr += pCtx->rdi; break;
11888 case 8: u64EffAddr += pCtx->r8; break;
11889 case 9: u64EffAddr += pCtx->r9; break;
11890 case 10: u64EffAddr += pCtx->r10; break;
11891 case 11: u64EffAddr += pCtx->r11; break;
11892 case 12: u64EffAddr += pCtx->r12; break;
11893 case 14: u64EffAddr += pCtx->r14; break;
11894 case 15: u64EffAddr += pCtx->r15; break;
11895 /* complicated encodings */
11896 case 5:
11897 case 13:
11898 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11899 {
11900 if (!pVCpu->iem.s.uRexB)
11901 {
11902 u64EffAddr += pCtx->rbp;
11903 SET_SS_DEF();
11904 }
11905 else
11906 u64EffAddr += pCtx->r13;
11907 }
11908 else
11909 {
11910 uint32_t u32Disp;
11911 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11912 u64EffAddr += (int32_t)u32Disp;
11913 }
11914 break;
11915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11916 }
11917 break;
11918 }
11919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11920 }
11921
11922 /* Get and add the displacement. */
11923 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11924 {
11925 case 0:
11926 break;
11927 case 1:
11928 {
11929 int8_t i8Disp;
11930 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11931 u64EffAddr += i8Disp;
11932 break;
11933 }
11934 case 2:
11935 {
11936 uint32_t u32Disp;
11937 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11938 u64EffAddr += (int32_t)u32Disp;
11939 break;
11940 }
11941 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11942 }
11943
11944 }
11945
11946 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11947 *pGCPtrEff = u64EffAddr;
11948 else
11949 {
11950 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11951 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11952 }
11953 }
11954
11955 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11956 return VINF_SUCCESS;
11957}
11958
11959
11960/**
11961 * Calculates the effective address of a ModR/M memory operand.
11962 *
11963 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11964 *
11965 * @return Strict VBox status code.
11966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11967 * @param bRm The ModRM byte.
11968 * @param cbImm The size of any immediate following the
11969 * effective address opcode bytes. Important for
11970 * RIP relative addressing.
11971 * @param pGCPtrEff Where to return the effective address.
11972 * @param offRsp RSP displacement.
11973 */
11974IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11975{
11976 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11977 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11978# define SET_SS_DEF() \
11979 do \
11980 { \
11981 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11982 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11983 } while (0)
11984
11985 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11986 {
11987/** @todo Check the effective address size crap! */
11988 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11989 {
11990 uint16_t u16EffAddr;
11991
11992 /* Handle the disp16 form with no registers first. */
11993 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11994 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11995 else
11996 {
11997 /* Get the displacment. */
11998 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11999 {
12000 case 0: u16EffAddr = 0; break;
12001 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12002 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12003 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12004 }
12005
12006 /* Add the base and index registers to the disp. */
12007 switch (bRm & X86_MODRM_RM_MASK)
12008 {
12009 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12010 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12011 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12012 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12013 case 4: u16EffAddr += pCtx->si; break;
12014 case 5: u16EffAddr += pCtx->di; break;
12015 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12016 case 7: u16EffAddr += pCtx->bx; break;
12017 }
12018 }
12019
12020 *pGCPtrEff = u16EffAddr;
12021 }
12022 else
12023 {
12024 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12025 uint32_t u32EffAddr;
12026
12027 /* Handle the disp32 form with no registers first. */
12028 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12029 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12030 else
12031 {
12032 /* Get the register (or SIB) value. */
12033 switch ((bRm & X86_MODRM_RM_MASK))
12034 {
12035 case 0: u32EffAddr = pCtx->eax; break;
12036 case 1: u32EffAddr = pCtx->ecx; break;
12037 case 2: u32EffAddr = pCtx->edx; break;
12038 case 3: u32EffAddr = pCtx->ebx; break;
12039 case 4: /* SIB */
12040 {
12041 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12042
12043 /* Get the index and scale it. */
12044 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12045 {
12046 case 0: u32EffAddr = pCtx->eax; break;
12047 case 1: u32EffAddr = pCtx->ecx; break;
12048 case 2: u32EffAddr = pCtx->edx; break;
12049 case 3: u32EffAddr = pCtx->ebx; break;
12050 case 4: u32EffAddr = 0; /*none */ break;
12051 case 5: u32EffAddr = pCtx->ebp; break;
12052 case 6: u32EffAddr = pCtx->esi; break;
12053 case 7: u32EffAddr = pCtx->edi; break;
12054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12055 }
12056 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12057
12058 /* add base */
12059 switch (bSib & X86_SIB_BASE_MASK)
12060 {
12061 case 0: u32EffAddr += pCtx->eax; break;
12062 case 1: u32EffAddr += pCtx->ecx; break;
12063 case 2: u32EffAddr += pCtx->edx; break;
12064 case 3: u32EffAddr += pCtx->ebx; break;
12065 case 4:
12066 u32EffAddr += pCtx->esp + offRsp;
12067 SET_SS_DEF();
12068 break;
12069 case 5:
12070 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12071 {
12072 u32EffAddr += pCtx->ebp;
12073 SET_SS_DEF();
12074 }
12075 else
12076 {
12077 uint32_t u32Disp;
12078 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12079 u32EffAddr += u32Disp;
12080 }
12081 break;
12082 case 6: u32EffAddr += pCtx->esi; break;
12083 case 7: u32EffAddr += pCtx->edi; break;
12084 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12085 }
12086 break;
12087 }
12088 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12089 case 6: u32EffAddr = pCtx->esi; break;
12090 case 7: u32EffAddr = pCtx->edi; break;
12091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12092 }
12093
12094 /* Get and add the displacement. */
12095 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12096 {
12097 case 0:
12098 break;
12099 case 1:
12100 {
12101 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12102 u32EffAddr += i8Disp;
12103 break;
12104 }
12105 case 2:
12106 {
12107 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12108 u32EffAddr += u32Disp;
12109 break;
12110 }
12111 default:
12112 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12113 }
12114
12115 }
12116 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12117 *pGCPtrEff = u32EffAddr;
12118 else
12119 {
12120 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12121 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12122 }
12123 }
12124 }
12125 else
12126 {
12127 uint64_t u64EffAddr;
12128
12129 /* Handle the rip+disp32 form with no registers first. */
12130 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12131 {
12132 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12133 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12134 }
12135 else
12136 {
12137 /* Get the register (or SIB) value. */
12138 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12139 {
12140 case 0: u64EffAddr = pCtx->rax; break;
12141 case 1: u64EffAddr = pCtx->rcx; break;
12142 case 2: u64EffAddr = pCtx->rdx; break;
12143 case 3: u64EffAddr = pCtx->rbx; break;
12144 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12145 case 6: u64EffAddr = pCtx->rsi; break;
12146 case 7: u64EffAddr = pCtx->rdi; break;
12147 case 8: u64EffAddr = pCtx->r8; break;
12148 case 9: u64EffAddr = pCtx->r9; break;
12149 case 10: u64EffAddr = pCtx->r10; break;
12150 case 11: u64EffAddr = pCtx->r11; break;
12151 case 13: u64EffAddr = pCtx->r13; break;
12152 case 14: u64EffAddr = pCtx->r14; break;
12153 case 15: u64EffAddr = pCtx->r15; break;
12154 /* SIB */
12155 case 4:
12156 case 12:
12157 {
12158 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12159
12160 /* Get the index and scale it. */
12161 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12162 {
12163 case 0: u64EffAddr = pCtx->rax; break;
12164 case 1: u64EffAddr = pCtx->rcx; break;
12165 case 2: u64EffAddr = pCtx->rdx; break;
12166 case 3: u64EffAddr = pCtx->rbx; break;
12167 case 4: u64EffAddr = 0; /*none */ break;
12168 case 5: u64EffAddr = pCtx->rbp; break;
12169 case 6: u64EffAddr = pCtx->rsi; break;
12170 case 7: u64EffAddr = pCtx->rdi; break;
12171 case 8: u64EffAddr = pCtx->r8; break;
12172 case 9: u64EffAddr = pCtx->r9; break;
12173 case 10: u64EffAddr = pCtx->r10; break;
12174 case 11: u64EffAddr = pCtx->r11; break;
12175 case 12: u64EffAddr = pCtx->r12; break;
12176 case 13: u64EffAddr = pCtx->r13; break;
12177 case 14: u64EffAddr = pCtx->r14; break;
12178 case 15: u64EffAddr = pCtx->r15; break;
12179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12180 }
12181 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12182
12183 /* add base */
12184 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12185 {
12186 case 0: u64EffAddr += pCtx->rax; break;
12187 case 1: u64EffAddr += pCtx->rcx; break;
12188 case 2: u64EffAddr += pCtx->rdx; break;
12189 case 3: u64EffAddr += pCtx->rbx; break;
12190 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12191 case 6: u64EffAddr += pCtx->rsi; break;
12192 case 7: u64EffAddr += pCtx->rdi; break;
12193 case 8: u64EffAddr += pCtx->r8; break;
12194 case 9: u64EffAddr += pCtx->r9; break;
12195 case 10: u64EffAddr += pCtx->r10; break;
12196 case 11: u64EffAddr += pCtx->r11; break;
12197 case 12: u64EffAddr += pCtx->r12; break;
12198 case 14: u64EffAddr += pCtx->r14; break;
12199 case 15: u64EffAddr += pCtx->r15; break;
12200 /* complicated encodings */
12201 case 5:
12202 case 13:
12203 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12204 {
12205 if (!pVCpu->iem.s.uRexB)
12206 {
12207 u64EffAddr += pCtx->rbp;
12208 SET_SS_DEF();
12209 }
12210 else
12211 u64EffAddr += pCtx->r13;
12212 }
12213 else
12214 {
12215 uint32_t u32Disp;
12216 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12217 u64EffAddr += (int32_t)u32Disp;
12218 }
12219 break;
12220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12221 }
12222 break;
12223 }
12224 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12225 }
12226
12227 /* Get and add the displacement. */
12228 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12229 {
12230 case 0:
12231 break;
12232 case 1:
12233 {
12234 int8_t i8Disp;
12235 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12236 u64EffAddr += i8Disp;
12237 break;
12238 }
12239 case 2:
12240 {
12241 uint32_t u32Disp;
12242 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12243 u64EffAddr += (int32_t)u32Disp;
12244 break;
12245 }
12246 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12247 }
12248
12249 }
12250
12251 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12252 *pGCPtrEff = u64EffAddr;
12253 else
12254 {
12255 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12256 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12257 }
12258 }
12259
12260 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12261 return VINF_SUCCESS;
12262}
12263
12264
12265#ifdef IEM_WITH_SETJMP
12266/**
12267 * Calculates the effective address of a ModR/M memory operand.
12268 *
12269 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12270 *
12271 * May longjmp on internal error.
12272 *
12273 * @return The effective address.
12274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12275 * @param bRm The ModRM byte.
12276 * @param cbImm The size of any immediate following the
12277 * effective address opcode bytes. Important for
12278 * RIP relative addressing.
12279 */
12280IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12281{
12282 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12283 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12284# define SET_SS_DEF() \
12285 do \
12286 { \
12287 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12288 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12289 } while (0)
12290
12291 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12292 {
12293/** @todo Check the effective address size crap! */
12294 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12295 {
12296 uint16_t u16EffAddr;
12297
12298 /* Handle the disp16 form with no registers first. */
12299 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12300 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12301 else
12302 {
12303 /* Get the displacment. */
12304 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12305 {
12306 case 0: u16EffAddr = 0; break;
12307 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12308 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12309 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12310 }
12311
12312 /* Add the base and index registers to the disp. */
12313 switch (bRm & X86_MODRM_RM_MASK)
12314 {
12315 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12316 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12317 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12318 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12319 case 4: u16EffAddr += pCtx->si; break;
12320 case 5: u16EffAddr += pCtx->di; break;
12321 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12322 case 7: u16EffAddr += pCtx->bx; break;
12323 }
12324 }
12325
12326 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12327 return u16EffAddr;
12328 }
12329
12330 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12331 uint32_t u32EffAddr;
12332
12333 /* Handle the disp32 form with no registers first. */
12334 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12335 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12336 else
12337 {
12338 /* Get the register (or SIB) value. */
12339 switch ((bRm & X86_MODRM_RM_MASK))
12340 {
12341 case 0: u32EffAddr = pCtx->eax; break;
12342 case 1: u32EffAddr = pCtx->ecx; break;
12343 case 2: u32EffAddr = pCtx->edx; break;
12344 case 3: u32EffAddr = pCtx->ebx; break;
12345 case 4: /* SIB */
12346 {
12347 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12348
12349 /* Get the index and scale it. */
12350 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12351 {
12352 case 0: u32EffAddr = pCtx->eax; break;
12353 case 1: u32EffAddr = pCtx->ecx; break;
12354 case 2: u32EffAddr = pCtx->edx; break;
12355 case 3: u32EffAddr = pCtx->ebx; break;
12356 case 4: u32EffAddr = 0; /*none */ break;
12357 case 5: u32EffAddr = pCtx->ebp; break;
12358 case 6: u32EffAddr = pCtx->esi; break;
12359 case 7: u32EffAddr = pCtx->edi; break;
12360 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12361 }
12362 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12363
12364 /* add base */
12365 switch (bSib & X86_SIB_BASE_MASK)
12366 {
12367 case 0: u32EffAddr += pCtx->eax; break;
12368 case 1: u32EffAddr += pCtx->ecx; break;
12369 case 2: u32EffAddr += pCtx->edx; break;
12370 case 3: u32EffAddr += pCtx->ebx; break;
12371 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12372 case 5:
12373 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12374 {
12375 u32EffAddr += pCtx->ebp;
12376 SET_SS_DEF();
12377 }
12378 else
12379 {
12380 uint32_t u32Disp;
12381 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12382 u32EffAddr += u32Disp;
12383 }
12384 break;
12385 case 6: u32EffAddr += pCtx->esi; break;
12386 case 7: u32EffAddr += pCtx->edi; break;
12387 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12388 }
12389 break;
12390 }
12391 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12392 case 6: u32EffAddr = pCtx->esi; break;
12393 case 7: u32EffAddr = pCtx->edi; break;
12394 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12395 }
12396
12397 /* Get and add the displacement. */
12398 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12399 {
12400 case 0:
12401 break;
12402 case 1:
12403 {
12404 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12405 u32EffAddr += i8Disp;
12406 break;
12407 }
12408 case 2:
12409 {
12410 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12411 u32EffAddr += u32Disp;
12412 break;
12413 }
12414 default:
12415 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12416 }
12417 }
12418
12419 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12420 {
12421 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12422 return u32EffAddr;
12423 }
12424 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12425 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12426 return u32EffAddr & UINT16_MAX;
12427 }
12428
12429 uint64_t u64EffAddr;
12430
12431 /* Handle the rip+disp32 form with no registers first. */
12432 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12433 {
12434 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12435 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12436 }
12437 else
12438 {
12439 /* Get the register (or SIB) value. */
12440 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12441 {
12442 case 0: u64EffAddr = pCtx->rax; break;
12443 case 1: u64EffAddr = pCtx->rcx; break;
12444 case 2: u64EffAddr = pCtx->rdx; break;
12445 case 3: u64EffAddr = pCtx->rbx; break;
12446 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12447 case 6: u64EffAddr = pCtx->rsi; break;
12448 case 7: u64EffAddr = pCtx->rdi; break;
12449 case 8: u64EffAddr = pCtx->r8; break;
12450 case 9: u64EffAddr = pCtx->r9; break;
12451 case 10: u64EffAddr = pCtx->r10; break;
12452 case 11: u64EffAddr = pCtx->r11; break;
12453 case 13: u64EffAddr = pCtx->r13; break;
12454 case 14: u64EffAddr = pCtx->r14; break;
12455 case 15: u64EffAddr = pCtx->r15; break;
12456 /* SIB */
12457 case 4:
12458 case 12:
12459 {
12460 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12461
12462 /* Get the index and scale it. */
12463 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12464 {
12465 case 0: u64EffAddr = pCtx->rax; break;
12466 case 1: u64EffAddr = pCtx->rcx; break;
12467 case 2: u64EffAddr = pCtx->rdx; break;
12468 case 3: u64EffAddr = pCtx->rbx; break;
12469 case 4: u64EffAddr = 0; /*none */ break;
12470 case 5: u64EffAddr = pCtx->rbp; break;
12471 case 6: u64EffAddr = pCtx->rsi; break;
12472 case 7: u64EffAddr = pCtx->rdi; break;
12473 case 8: u64EffAddr = pCtx->r8; break;
12474 case 9: u64EffAddr = pCtx->r9; break;
12475 case 10: u64EffAddr = pCtx->r10; break;
12476 case 11: u64EffAddr = pCtx->r11; break;
12477 case 12: u64EffAddr = pCtx->r12; break;
12478 case 13: u64EffAddr = pCtx->r13; break;
12479 case 14: u64EffAddr = pCtx->r14; break;
12480 case 15: u64EffAddr = pCtx->r15; break;
12481 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12482 }
12483 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12484
12485 /* add base */
12486 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12487 {
12488 case 0: u64EffAddr += pCtx->rax; break;
12489 case 1: u64EffAddr += pCtx->rcx; break;
12490 case 2: u64EffAddr += pCtx->rdx; break;
12491 case 3: u64EffAddr += pCtx->rbx; break;
12492 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12493 case 6: u64EffAddr += pCtx->rsi; break;
12494 case 7: u64EffAddr += pCtx->rdi; break;
12495 case 8: u64EffAddr += pCtx->r8; break;
12496 case 9: u64EffAddr += pCtx->r9; break;
12497 case 10: u64EffAddr += pCtx->r10; break;
12498 case 11: u64EffAddr += pCtx->r11; break;
12499 case 12: u64EffAddr += pCtx->r12; break;
12500 case 14: u64EffAddr += pCtx->r14; break;
12501 case 15: u64EffAddr += pCtx->r15; break;
12502 /* complicated encodings */
12503 case 5:
12504 case 13:
12505 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12506 {
12507 if (!pVCpu->iem.s.uRexB)
12508 {
12509 u64EffAddr += pCtx->rbp;
12510 SET_SS_DEF();
12511 }
12512 else
12513 u64EffAddr += pCtx->r13;
12514 }
12515 else
12516 {
12517 uint32_t u32Disp;
12518 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12519 u64EffAddr += (int32_t)u32Disp;
12520 }
12521 break;
12522 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12523 }
12524 break;
12525 }
12526 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12527 }
12528
12529 /* Get and add the displacement. */
12530 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12531 {
12532 case 0:
12533 break;
12534 case 1:
12535 {
12536 int8_t i8Disp;
12537 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12538 u64EffAddr += i8Disp;
12539 break;
12540 }
12541 case 2:
12542 {
12543 uint32_t u32Disp;
12544 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12545 u64EffAddr += (int32_t)u32Disp;
12546 break;
12547 }
12548 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12549 }
12550
12551 }
12552
12553 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12554 {
12555 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12556 return u64EffAddr;
12557 }
12558 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12559 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12560 return u64EffAddr & UINT32_MAX;
12561}
12562#endif /* IEM_WITH_SETJMP */
12563
12564
12565/** @} */
12566
12567
12568
12569/*
12570 * Include the instructions
12571 */
12572#include "IEMAllInstructions.cpp.h"
12573
12574
12575
12576
12577#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12578
12579/**
12580 * Sets up execution verification mode.
12581 */
12582IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12583{
12584 PVMCPU pVCpu = pVCpu;
12585 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12586
12587 /*
12588 * Always note down the address of the current instruction.
12589 */
12590 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12591 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12592
12593 /*
12594 * Enable verification and/or logging.
12595 */
12596 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12597 if ( fNewNoRem
12598 && ( 0
12599#if 0 /* auto enable on first paged protected mode interrupt */
12600 || ( pOrgCtx->eflags.Bits.u1IF
12601 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12602 && TRPMHasTrap(pVCpu)
12603 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12604#endif
12605#if 0
12606 || ( pOrgCtx->cs == 0x10
12607 && ( pOrgCtx->rip == 0x90119e3e
12608 || pOrgCtx->rip == 0x901d9810)
12609#endif
12610#if 0 /* Auto enable DSL - FPU stuff. */
12611 || ( pOrgCtx->cs == 0x10
12612 && (// pOrgCtx->rip == 0xc02ec07f
12613 //|| pOrgCtx->rip == 0xc02ec082
12614 //|| pOrgCtx->rip == 0xc02ec0c9
12615 0
12616 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12617#endif
12618#if 0 /* Auto enable DSL - fstp st0 stuff. */
12619 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12620#endif
12621#if 0
12622 || pOrgCtx->rip == 0x9022bb3a
12623#endif
12624#if 0
12625 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12626#endif
12627#if 0
12628 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12629 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12630#endif
12631#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12632 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12633 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12634 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12635#endif
12636#if 0 /* NT4SP1 - xadd early boot. */
12637 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12638#endif
12639#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12640 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12641#endif
12642#if 0 /* NT4SP1 - cmpxchg (AMD). */
12643 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12644#endif
12645#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12646 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12647#endif
12648#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12649 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12650
12651#endif
12652#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12653 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12654
12655#endif
12656#if 0 /* NT4SP1 - frstor [ecx] */
12657 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12658#endif
12659#if 0 /* xxxxxx - All long mode code. */
12660 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12661#endif
12662#if 0 /* rep movsq linux 3.7 64-bit boot. */
12663 || (pOrgCtx->rip == 0x0000000000100241)
12664#endif
12665#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12666 || (pOrgCtx->rip == 0x000000000215e240)
12667#endif
12668#if 0 /* DOS's size-overridden iret to v8086. */
12669 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12670#endif
12671 )
12672 )
12673 {
12674 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12675 RTLogFlags(NULL, "enabled");
12676 fNewNoRem = false;
12677 }
12678 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12679 {
12680 pVCpu->iem.s.fNoRem = fNewNoRem;
12681 if (!fNewNoRem)
12682 {
12683 LogAlways(("Enabling verification mode!\n"));
12684 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12685 }
12686 else
12687 LogAlways(("Disabling verification mode!\n"));
12688 }
12689
12690 /*
12691 * Switch state.
12692 */
12693 if (IEM_VERIFICATION_ENABLED(pVCpu))
12694 {
12695 static CPUMCTX s_DebugCtx; /* Ugly! */
12696
12697 s_DebugCtx = *pOrgCtx;
12698 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12699 }
12700
12701 /*
12702 * See if there is an interrupt pending in TRPM and inject it if we can.
12703 */
12704 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12705 if ( pOrgCtx->eflags.Bits.u1IF
12706 && TRPMHasTrap(pVCpu)
12707 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12708 {
12709 uint8_t u8TrapNo;
12710 TRPMEVENT enmType;
12711 RTGCUINT uErrCode;
12712 RTGCPTR uCr2;
12713 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12714 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12715 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12716 TRPMResetTrap(pVCpu);
12717 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12718 }
12719
12720 /*
12721 * Reset the counters.
12722 */
12723 pVCpu->iem.s.cIOReads = 0;
12724 pVCpu->iem.s.cIOWrites = 0;
12725 pVCpu->iem.s.fIgnoreRaxRdx = false;
12726 pVCpu->iem.s.fOverlappingMovs = false;
12727 pVCpu->iem.s.fProblematicMemory = false;
12728 pVCpu->iem.s.fUndefinedEFlags = 0;
12729
12730 if (IEM_VERIFICATION_ENABLED(pVCpu))
12731 {
12732 /*
12733 * Free all verification records.
12734 */
12735 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12736 pVCpu->iem.s.pIemEvtRecHead = NULL;
12737 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12738 do
12739 {
12740 while (pEvtRec)
12741 {
12742 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12743 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12744 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12745 pEvtRec = pNext;
12746 }
12747 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12748 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12749 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12750 } while (pEvtRec);
12751 }
12752}
12753
12754
12755/**
12756 * Allocate an event record.
12757 * @returns Pointer to a record.
12758 */
12759IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12760{
12761 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12762 return NULL;
12763
12764 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12765 if (pEvtRec)
12766 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12767 else
12768 {
12769 if (!pVCpu->iem.s.ppIemEvtRecNext)
12770 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12771
12772 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12773 if (!pEvtRec)
12774 return NULL;
12775 }
12776 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12777 pEvtRec->pNext = NULL;
12778 return pEvtRec;
12779}
12780
12781
12782/**
12783 * IOMMMIORead notification.
12784 */
12785VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12786{
12787 PVMCPU pVCpu = VMMGetCpu(pVM);
12788 if (!pVCpu)
12789 return;
12790 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12791 if (!pEvtRec)
12792 return;
12793 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12794 pEvtRec->u.RamRead.GCPhys = GCPhys;
12795 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12796 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12797 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12798}
12799
12800
12801/**
12802 * IOMMMIOWrite notification.
12803 */
12804VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12805{
12806 PVMCPU pVCpu = VMMGetCpu(pVM);
12807 if (!pVCpu)
12808 return;
12809 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12810 if (!pEvtRec)
12811 return;
12812 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12813 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12814 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12815 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12816 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12817 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12818 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12819 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12820 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12821}
12822
12823
12824/**
12825 * IOMIOPortRead notification.
12826 */
12827VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12828{
12829 PVMCPU pVCpu = VMMGetCpu(pVM);
12830 if (!pVCpu)
12831 return;
12832 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12833 if (!pEvtRec)
12834 return;
12835 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12836 pEvtRec->u.IOPortRead.Port = Port;
12837 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12838 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12839 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12840}
12841
12842/**
12843 * IOMIOPortWrite notification.
12844 */
12845VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12846{
12847 PVMCPU pVCpu = VMMGetCpu(pVM);
12848 if (!pVCpu)
12849 return;
12850 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12851 if (!pEvtRec)
12852 return;
12853 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12854 pEvtRec->u.IOPortWrite.Port = Port;
12855 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12856 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12857 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12858 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12859}
12860
12861
12862VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12863{
12864 PVMCPU pVCpu = VMMGetCpu(pVM);
12865 if (!pVCpu)
12866 return;
12867 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12868 if (!pEvtRec)
12869 return;
12870 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12871 pEvtRec->u.IOPortStrRead.Port = Port;
12872 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12873 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12874 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12875 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12876}
12877
12878
12879VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12880{
12881 PVMCPU pVCpu = VMMGetCpu(pVM);
12882 if (!pVCpu)
12883 return;
12884 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12885 if (!pEvtRec)
12886 return;
12887 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12888 pEvtRec->u.IOPortStrWrite.Port = Port;
12889 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12890 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12891 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12892 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12893}
12894
12895
12896/**
12897 * Fakes and records an I/O port read.
12898 *
12899 * @returns VINF_SUCCESS.
12900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12901 * @param Port The I/O port.
12902 * @param pu32Value Where to store the fake value.
12903 * @param cbValue The size of the access.
12904 */
12905IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12906{
12907 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12908 if (pEvtRec)
12909 {
12910 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12911 pEvtRec->u.IOPortRead.Port = Port;
12912 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12913 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12914 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12915 }
12916 pVCpu->iem.s.cIOReads++;
12917 *pu32Value = 0xcccccccc;
12918 return VINF_SUCCESS;
12919}
12920
12921
12922/**
12923 * Fakes and records an I/O port write.
12924 *
12925 * @returns VINF_SUCCESS.
12926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12927 * @param Port The I/O port.
12928 * @param u32Value The value being written.
12929 * @param cbValue The size of the access.
12930 */
12931IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12932{
12933 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12934 if (pEvtRec)
12935 {
12936 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12937 pEvtRec->u.IOPortWrite.Port = Port;
12938 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12939 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12940 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12941 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12942 }
12943 pVCpu->iem.s.cIOWrites++;
12944 return VINF_SUCCESS;
12945}
12946
12947
12948/**
12949 * Used to add extra details about a stub case.
12950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12951 */
12952IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12953{
12954 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12955 PVM pVM = pVCpu->CTX_SUFF(pVM);
12956 PVMCPU pVCpu = pVCpu;
12957 char szRegs[4096];
12958 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12959 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12960 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12961 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12962 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12963 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12964 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12965 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12966 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12967 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12968 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12969 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12970 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12971 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12972 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12973 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12974 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12975 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12976 " efer=%016VR{efer}\n"
12977 " pat=%016VR{pat}\n"
12978 " sf_mask=%016VR{sf_mask}\n"
12979 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12980 " lstar=%016VR{lstar}\n"
12981 " star=%016VR{star} cstar=%016VR{cstar}\n"
12982 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12983 );
12984
12985 char szInstr1[256];
12986 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12987 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12988 szInstr1, sizeof(szInstr1), NULL);
12989 char szInstr2[256];
12990 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12991 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12992 szInstr2, sizeof(szInstr2), NULL);
12993
12994 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12995}
12996
12997
12998/**
12999 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13000 * dump to the assertion info.
13001 *
13002 * @param pEvtRec The record to dump.
13003 */
13004IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13005{
13006 switch (pEvtRec->enmEvent)
13007 {
13008 case IEMVERIFYEVENT_IOPORT_READ:
13009 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13010 pEvtRec->u.IOPortWrite.Port,
13011 pEvtRec->u.IOPortWrite.cbValue);
13012 break;
13013 case IEMVERIFYEVENT_IOPORT_WRITE:
13014 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13015 pEvtRec->u.IOPortWrite.Port,
13016 pEvtRec->u.IOPortWrite.cbValue,
13017 pEvtRec->u.IOPortWrite.u32Value);
13018 break;
13019 case IEMVERIFYEVENT_IOPORT_STR_READ:
13020 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13021 pEvtRec->u.IOPortStrWrite.Port,
13022 pEvtRec->u.IOPortStrWrite.cbValue,
13023 pEvtRec->u.IOPortStrWrite.cTransfers);
13024 break;
13025 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13026 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13027 pEvtRec->u.IOPortStrWrite.Port,
13028 pEvtRec->u.IOPortStrWrite.cbValue,
13029 pEvtRec->u.IOPortStrWrite.cTransfers);
13030 break;
13031 case IEMVERIFYEVENT_RAM_READ:
13032 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13033 pEvtRec->u.RamRead.GCPhys,
13034 pEvtRec->u.RamRead.cb);
13035 break;
13036 case IEMVERIFYEVENT_RAM_WRITE:
13037 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13038 pEvtRec->u.RamWrite.GCPhys,
13039 pEvtRec->u.RamWrite.cb,
13040 (int)pEvtRec->u.RamWrite.cb,
13041 pEvtRec->u.RamWrite.ab);
13042 break;
13043 default:
13044 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13045 break;
13046 }
13047}
13048
13049
13050/**
13051 * Raises an assertion on the specified record, showing the given message with
13052 * a record dump attached.
13053 *
13054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13055 * @param pEvtRec1 The first record.
13056 * @param pEvtRec2 The second record.
13057 * @param pszMsg The message explaining why we're asserting.
13058 */
13059IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13060{
13061 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13062 iemVerifyAssertAddRecordDump(pEvtRec1);
13063 iemVerifyAssertAddRecordDump(pEvtRec2);
13064 iemVerifyAssertMsg2(pVCpu);
13065 RTAssertPanic();
13066}
13067
13068
13069/**
13070 * Raises an assertion on the specified record, showing the given message with
13071 * a record dump attached.
13072 *
13073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13074 * @param pEvtRec1 The first record.
13075 * @param pszMsg The message explaining why we're asserting.
13076 */
13077IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13078{
13079 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13080 iemVerifyAssertAddRecordDump(pEvtRec);
13081 iemVerifyAssertMsg2(pVCpu);
13082 RTAssertPanic();
13083}
13084
13085
13086/**
13087 * Verifies a write record.
13088 *
13089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13090 * @param pEvtRec The write record.
13091 * @param fRem Set if REM was doing the other executing. If clear
13092 * it was HM.
13093 */
13094IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13095{
13096 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13097 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13098 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13099 if ( RT_FAILURE(rc)
13100 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13101 {
13102 /* fend off ins */
13103 if ( !pVCpu->iem.s.cIOReads
13104 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13105 || ( pEvtRec->u.RamWrite.cb != 1
13106 && pEvtRec->u.RamWrite.cb != 2
13107 && pEvtRec->u.RamWrite.cb != 4) )
13108 {
13109 /* fend off ROMs and MMIO */
13110 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13111 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13112 {
13113 /* fend off fxsave */
13114 if (pEvtRec->u.RamWrite.cb != 512)
13115 {
13116 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13117 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13118 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13119 RTAssertMsg2Add("%s: %.*Rhxs\n"
13120 "iem: %.*Rhxs\n",
13121 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13122 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13123 iemVerifyAssertAddRecordDump(pEvtRec);
13124 iemVerifyAssertMsg2(pVCpu);
13125 RTAssertPanic();
13126 }
13127 }
13128 }
13129 }
13130
13131}
13132
13133/**
13134 * Performs the post-execution verfication checks.
13135 */
13136IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13137{
13138 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13139 return rcStrictIem;
13140
13141 /*
13142 * Switch back the state.
13143 */
13144 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13145 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13146 Assert(pOrgCtx != pDebugCtx);
13147 IEM_GET_CTX(pVCpu) = pOrgCtx;
13148
13149 /*
13150 * Execute the instruction in REM.
13151 */
13152 bool fRem = false;
13153 PVM pVM = pVCpu->CTX_SUFF(pVM);
13154 PVMCPU pVCpu = pVCpu;
13155 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13156#ifdef IEM_VERIFICATION_MODE_FULL_HM
13157 if ( HMIsEnabled(pVM)
13158 && pVCpu->iem.s.cIOReads == 0
13159 && pVCpu->iem.s.cIOWrites == 0
13160 && !pVCpu->iem.s.fProblematicMemory)
13161 {
13162 uint64_t uStartRip = pOrgCtx->rip;
13163 unsigned iLoops = 0;
13164 do
13165 {
13166 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13167 iLoops++;
13168 } while ( rc == VINF_SUCCESS
13169 || ( rc == VINF_EM_DBG_STEPPED
13170 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13171 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13172 || ( pOrgCtx->rip != pDebugCtx->rip
13173 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13174 && iLoops < 8) );
13175 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13176 rc = VINF_SUCCESS;
13177 }
13178#endif
13179 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13180 || rc == VINF_IOM_R3_IOPORT_READ
13181 || rc == VINF_IOM_R3_IOPORT_WRITE
13182 || rc == VINF_IOM_R3_MMIO_READ
13183 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13184 || rc == VINF_IOM_R3_MMIO_WRITE
13185 || rc == VINF_CPUM_R3_MSR_READ
13186 || rc == VINF_CPUM_R3_MSR_WRITE
13187 || rc == VINF_EM_RESCHEDULE
13188 )
13189 {
13190 EMRemLock(pVM);
13191 rc = REMR3EmulateInstruction(pVM, pVCpu);
13192 AssertRC(rc);
13193 EMRemUnlock(pVM);
13194 fRem = true;
13195 }
13196
13197# if 1 /* Skip unimplemented instructions for now. */
13198 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13199 {
13200 IEM_GET_CTX(pVCpu) = pOrgCtx;
13201 if (rc == VINF_EM_DBG_STEPPED)
13202 return VINF_SUCCESS;
13203 return rc;
13204 }
13205# endif
13206
13207 /*
13208 * Compare the register states.
13209 */
13210 unsigned cDiffs = 0;
13211 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13212 {
13213 //Log(("REM and IEM ends up with different registers!\n"));
13214 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13215
13216# define CHECK_FIELD(a_Field) \
13217 do \
13218 { \
13219 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13220 { \
13221 switch (sizeof(pOrgCtx->a_Field)) \
13222 { \
13223 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13224 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13225 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13226 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13227 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13228 } \
13229 cDiffs++; \
13230 } \
13231 } while (0)
13232# define CHECK_XSTATE_FIELD(a_Field) \
13233 do \
13234 { \
13235 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13236 { \
13237 switch (sizeof(pOrgXState->a_Field)) \
13238 { \
13239 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13240 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13241 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13242 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13243 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13244 } \
13245 cDiffs++; \
13246 } \
13247 } while (0)
13248
13249# define CHECK_BIT_FIELD(a_Field) \
13250 do \
13251 { \
13252 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13253 { \
13254 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13255 cDiffs++; \
13256 } \
13257 } while (0)
13258
13259# define CHECK_SEL(a_Sel) \
13260 do \
13261 { \
13262 CHECK_FIELD(a_Sel.Sel); \
13263 CHECK_FIELD(a_Sel.Attr.u); \
13264 CHECK_FIELD(a_Sel.u64Base); \
13265 CHECK_FIELD(a_Sel.u32Limit); \
13266 CHECK_FIELD(a_Sel.fFlags); \
13267 } while (0)
13268
13269 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13270 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13271
13272#if 1 /* The recompiler doesn't update these the intel way. */
13273 if (fRem)
13274 {
13275 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13276 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13277 pOrgXState->x87.CS = pDebugXState->x87.CS;
13278 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13279 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13280 pOrgXState->x87.DS = pDebugXState->x87.DS;
13281 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13282 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13283 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13284 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13285 }
13286#endif
13287 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13288 {
13289 RTAssertMsg2Weak(" the FPU state differs\n");
13290 cDiffs++;
13291 CHECK_XSTATE_FIELD(x87.FCW);
13292 CHECK_XSTATE_FIELD(x87.FSW);
13293 CHECK_XSTATE_FIELD(x87.FTW);
13294 CHECK_XSTATE_FIELD(x87.FOP);
13295 CHECK_XSTATE_FIELD(x87.FPUIP);
13296 CHECK_XSTATE_FIELD(x87.CS);
13297 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13298 CHECK_XSTATE_FIELD(x87.FPUDP);
13299 CHECK_XSTATE_FIELD(x87.DS);
13300 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13301 CHECK_XSTATE_FIELD(x87.MXCSR);
13302 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13303 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13304 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13305 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13306 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13307 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13308 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13309 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13310 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13311 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13312 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13313 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13314 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13315 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13316 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13317 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13318 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13319 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13320 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13321 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13322 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13323 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13324 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13325 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13326 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13327 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13328 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13329 }
13330 CHECK_FIELD(rip);
13331 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13332 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13333 {
13334 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13335 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13336 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13337 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13338 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13339 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13340 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13341 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13342 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13343 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13344 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13345 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13346 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13347 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13348 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13349 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13350 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13351 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13352 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13353 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13354 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13355 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13356 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13357 }
13358
13359 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13360 CHECK_FIELD(rax);
13361 CHECK_FIELD(rcx);
13362 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13363 CHECK_FIELD(rdx);
13364 CHECK_FIELD(rbx);
13365 CHECK_FIELD(rsp);
13366 CHECK_FIELD(rbp);
13367 CHECK_FIELD(rsi);
13368 CHECK_FIELD(rdi);
13369 CHECK_FIELD(r8);
13370 CHECK_FIELD(r9);
13371 CHECK_FIELD(r10);
13372 CHECK_FIELD(r11);
13373 CHECK_FIELD(r12);
13374 CHECK_FIELD(r13);
13375 CHECK_SEL(cs);
13376 CHECK_SEL(ss);
13377 CHECK_SEL(ds);
13378 CHECK_SEL(es);
13379 CHECK_SEL(fs);
13380 CHECK_SEL(gs);
13381 CHECK_FIELD(cr0);
13382
13383 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13384 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13385 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13386 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13387 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13388 {
13389 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13390 { /* ignore */ }
13391 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13392 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13393 && fRem)
13394 { /* ignore */ }
13395 else
13396 CHECK_FIELD(cr2);
13397 }
13398 CHECK_FIELD(cr3);
13399 CHECK_FIELD(cr4);
13400 CHECK_FIELD(dr[0]);
13401 CHECK_FIELD(dr[1]);
13402 CHECK_FIELD(dr[2]);
13403 CHECK_FIELD(dr[3]);
13404 CHECK_FIELD(dr[6]);
13405 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13406 CHECK_FIELD(dr[7]);
13407 CHECK_FIELD(gdtr.cbGdt);
13408 CHECK_FIELD(gdtr.pGdt);
13409 CHECK_FIELD(idtr.cbIdt);
13410 CHECK_FIELD(idtr.pIdt);
13411 CHECK_SEL(ldtr);
13412 CHECK_SEL(tr);
13413 CHECK_FIELD(SysEnter.cs);
13414 CHECK_FIELD(SysEnter.eip);
13415 CHECK_FIELD(SysEnter.esp);
13416 CHECK_FIELD(msrEFER);
13417 CHECK_FIELD(msrSTAR);
13418 CHECK_FIELD(msrPAT);
13419 CHECK_FIELD(msrLSTAR);
13420 CHECK_FIELD(msrCSTAR);
13421 CHECK_FIELD(msrSFMASK);
13422 CHECK_FIELD(msrKERNELGSBASE);
13423
13424 if (cDiffs != 0)
13425 {
13426 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13427 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13428 RTAssertPanic();
13429 static bool volatile s_fEnterDebugger = true;
13430 if (s_fEnterDebugger)
13431 DBGFSTOP(pVM);
13432
13433# if 1 /* Ignore unimplemented instructions for now. */
13434 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13435 rcStrictIem = VINF_SUCCESS;
13436# endif
13437 }
13438# undef CHECK_FIELD
13439# undef CHECK_BIT_FIELD
13440 }
13441
13442 /*
13443 * If the register state compared fine, check the verification event
13444 * records.
13445 */
13446 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13447 {
13448 /*
13449 * Compare verficiation event records.
13450 * - I/O port accesses should be a 1:1 match.
13451 */
13452 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13453 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13454 while (pIemRec && pOtherRec)
13455 {
13456 /* Since we might miss RAM writes and reads, ignore reads and check
13457 that any written memory is the same extra ones. */
13458 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13459 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13460 && pIemRec->pNext)
13461 {
13462 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13463 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13464 pIemRec = pIemRec->pNext;
13465 }
13466
13467 /* Do the compare. */
13468 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13469 {
13470 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13471 break;
13472 }
13473 bool fEquals;
13474 switch (pIemRec->enmEvent)
13475 {
13476 case IEMVERIFYEVENT_IOPORT_READ:
13477 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13478 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13479 break;
13480 case IEMVERIFYEVENT_IOPORT_WRITE:
13481 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13482 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13483 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13484 break;
13485 case IEMVERIFYEVENT_IOPORT_STR_READ:
13486 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13487 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13488 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13489 break;
13490 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13491 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13492 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13493 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13494 break;
13495 case IEMVERIFYEVENT_RAM_READ:
13496 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13497 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13498 break;
13499 case IEMVERIFYEVENT_RAM_WRITE:
13500 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13501 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13502 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13503 break;
13504 default:
13505 fEquals = false;
13506 break;
13507 }
13508 if (!fEquals)
13509 {
13510 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13511 break;
13512 }
13513
13514 /* advance */
13515 pIemRec = pIemRec->pNext;
13516 pOtherRec = pOtherRec->pNext;
13517 }
13518
13519 /* Ignore extra writes and reads. */
13520 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13521 {
13522 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13523 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13524 pIemRec = pIemRec->pNext;
13525 }
13526 if (pIemRec != NULL)
13527 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13528 else if (pOtherRec != NULL)
13529 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13530 }
13531 IEM_GET_CTX(pVCpu) = pOrgCtx;
13532
13533 return rcStrictIem;
13534}
13535
13536#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13537
13538/* stubs */
13539IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13540{
13541 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13542 return VERR_INTERNAL_ERROR;
13543}
13544
13545IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13546{
13547 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13548 return VERR_INTERNAL_ERROR;
13549}
13550
13551#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13552
13553
13554#ifdef LOG_ENABLED
13555/**
13556 * Logs the current instruction.
13557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13558 * @param pCtx The current CPU context.
13559 * @param fSameCtx Set if we have the same context information as the VMM,
13560 * clear if we may have already executed an instruction in
13561 * our debug context. When clear, we assume IEMCPU holds
13562 * valid CPU mode info.
13563 */
13564IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13565{
13566# ifdef IN_RING3
13567 if (LogIs2Enabled())
13568 {
13569 char szInstr[256];
13570 uint32_t cbInstr = 0;
13571 if (fSameCtx)
13572 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13573 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13574 szInstr, sizeof(szInstr), &cbInstr);
13575 else
13576 {
13577 uint32_t fFlags = 0;
13578 switch (pVCpu->iem.s.enmCpuMode)
13579 {
13580 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13581 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13582 case IEMMODE_16BIT:
13583 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13584 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13585 else
13586 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13587 break;
13588 }
13589 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13590 szInstr, sizeof(szInstr), &cbInstr);
13591 }
13592
13593 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13594 Log2(("****\n"
13595 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13596 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13597 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13598 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13599 " %s\n"
13600 ,
13601 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13602 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13603 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13604 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13605 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13606 szInstr));
13607
13608 if (LogIs3Enabled())
13609 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13610 }
13611 else
13612# endif
13613 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13614 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13615 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13616}
13617#endif
13618
13619
13620/**
13621 * Makes status code addjustments (pass up from I/O and access handler)
13622 * as well as maintaining statistics.
13623 *
13624 * @returns Strict VBox status code to pass up.
13625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13626 * @param rcStrict The status from executing an instruction.
13627 */
13628DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13629{
13630 if (rcStrict != VINF_SUCCESS)
13631 {
13632 if (RT_SUCCESS(rcStrict))
13633 {
13634 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13635 || rcStrict == VINF_IOM_R3_IOPORT_READ
13636 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13637 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13638 || rcStrict == VINF_IOM_R3_MMIO_READ
13639 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13640 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13641 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13642 || rcStrict == VINF_CPUM_R3_MSR_READ
13643 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13644 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13645 || rcStrict == VINF_EM_RAW_TO_R3
13646 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13647 /* raw-mode / virt handlers only: */
13648 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13649 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13650 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13651 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13652 || rcStrict == VINF_SELM_SYNC_GDT
13653 || rcStrict == VINF_CSAM_PENDING_ACTION
13654 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13655 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13656/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13657 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13658 if (rcPassUp == VINF_SUCCESS)
13659 pVCpu->iem.s.cRetInfStatuses++;
13660 else if ( rcPassUp < VINF_EM_FIRST
13661 || rcPassUp > VINF_EM_LAST
13662 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13663 {
13664 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13665 pVCpu->iem.s.cRetPassUpStatus++;
13666 rcStrict = rcPassUp;
13667 }
13668 else
13669 {
13670 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13671 pVCpu->iem.s.cRetInfStatuses++;
13672 }
13673 }
13674 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13675 pVCpu->iem.s.cRetAspectNotImplemented++;
13676 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13677 pVCpu->iem.s.cRetInstrNotImplemented++;
13678#ifdef IEM_VERIFICATION_MODE_FULL
13679 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13680 rcStrict = VINF_SUCCESS;
13681#endif
13682 else
13683 pVCpu->iem.s.cRetErrStatuses++;
13684 }
13685 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13686 {
13687 pVCpu->iem.s.cRetPassUpStatus++;
13688 rcStrict = pVCpu->iem.s.rcPassUp;
13689 }
13690
13691 return rcStrict;
13692}
13693
13694
13695/**
13696 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13697 * IEMExecOneWithPrefetchedByPC.
13698 *
13699 * Similar code is found in IEMExecLots.
13700 *
13701 * @return Strict VBox status code.
13702 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13704 * @param fExecuteInhibit If set, execute the instruction following CLI,
13705 * POP SS and MOV SS,GR.
13706 */
13707DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13708{
13709#ifdef IEM_WITH_SETJMP
13710 VBOXSTRICTRC rcStrict;
13711 jmp_buf JmpBuf;
13712 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13713 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13714 if ((rcStrict = setjmp(JmpBuf)) == 0)
13715 {
13716 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13717 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13718 }
13719 else
13720 pVCpu->iem.s.cLongJumps++;
13721 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13722#else
13723 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13724 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13725#endif
13726 if (rcStrict == VINF_SUCCESS)
13727 pVCpu->iem.s.cInstructions++;
13728 if (pVCpu->iem.s.cActiveMappings > 0)
13729 {
13730 Assert(rcStrict != VINF_SUCCESS);
13731 iemMemRollback(pVCpu);
13732 }
13733//#ifdef DEBUG
13734// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13735//#endif
13736
13737 /* Execute the next instruction as well if a cli, pop ss or
13738 mov ss, Gr has just completed successfully. */
13739 if ( fExecuteInhibit
13740 && rcStrict == VINF_SUCCESS
13741 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13742 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13743 {
13744 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13745 if (rcStrict == VINF_SUCCESS)
13746 {
13747#ifdef LOG_ENABLED
13748 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13749#endif
13750#ifdef IEM_WITH_SETJMP
13751 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13752 if ((rcStrict = setjmp(JmpBuf)) == 0)
13753 {
13754 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13755 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13756 }
13757 else
13758 pVCpu->iem.s.cLongJumps++;
13759 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13760#else
13761 IEM_OPCODE_GET_NEXT_U8(&b);
13762 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13763#endif
13764 if (rcStrict == VINF_SUCCESS)
13765 pVCpu->iem.s.cInstructions++;
13766 if (pVCpu->iem.s.cActiveMappings > 0)
13767 {
13768 Assert(rcStrict != VINF_SUCCESS);
13769 iemMemRollback(pVCpu);
13770 }
13771 }
13772 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13773 }
13774
13775 /*
13776 * Return value fiddling, statistics and sanity assertions.
13777 */
13778 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13779
13780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13782#if defined(IEM_VERIFICATION_MODE_FULL)
13783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13787#endif
13788 return rcStrict;
13789}
13790
13791
13792#ifdef IN_RC
13793/**
13794 * Re-enters raw-mode or ensure we return to ring-3.
13795 *
13796 * @returns rcStrict, maybe modified.
13797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13798 * @param pCtx The current CPU context.
13799 * @param rcStrict The status code returne by the interpreter.
13800 */
13801DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13802{
13803 if ( !pVCpu->iem.s.fInPatchCode
13804 && ( rcStrict == VINF_SUCCESS
13805 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13806 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13807 {
13808 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13809 CPUMRawEnter(pVCpu);
13810 else
13811 {
13812 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13813 rcStrict = VINF_EM_RESCHEDULE;
13814 }
13815 }
13816 return rcStrict;
13817}
13818#endif
13819
13820
13821/**
13822 * Execute one instruction.
13823 *
13824 * @return Strict VBox status code.
13825 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13826 */
13827VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13828{
13829#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13830 if (++pVCpu->iem.s.cVerifyDepth == 1)
13831 iemExecVerificationModeSetup(pVCpu);
13832#endif
13833#ifdef LOG_ENABLED
13834 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13835 iemLogCurInstr(pVCpu, pCtx, true);
13836#endif
13837
13838 /*
13839 * Do the decoding and emulation.
13840 */
13841 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13842 if (rcStrict == VINF_SUCCESS)
13843 rcStrict = iemExecOneInner(pVCpu, true);
13844
13845#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13846 /*
13847 * Assert some sanity.
13848 */
13849 if (pVCpu->iem.s.cVerifyDepth == 1)
13850 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13851 pVCpu->iem.s.cVerifyDepth--;
13852#endif
13853#ifdef IN_RC
13854 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13855#endif
13856 if (rcStrict != VINF_SUCCESS)
13857 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13858 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13859 return rcStrict;
13860}
13861
13862
13863VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13864{
13865 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13866 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13867
13868 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13869 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13870 if (rcStrict == VINF_SUCCESS)
13871 {
13872 rcStrict = iemExecOneInner(pVCpu, true);
13873 if (pcbWritten)
13874 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13875 }
13876
13877#ifdef IN_RC
13878 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13879#endif
13880 return rcStrict;
13881}
13882
13883
13884VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13885 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13886{
13887 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13888 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13889
13890 VBOXSTRICTRC rcStrict;
13891 if ( cbOpcodeBytes
13892 && pCtx->rip == OpcodeBytesPC)
13893 {
13894 iemInitDecoder(pVCpu, false);
13895#ifdef IEM_WITH_CODE_TLB
13896 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13897 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13898 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13899 pVCpu->iem.s.offCurInstrStart = 0;
13900 pVCpu->iem.s.offInstrNextByte = 0;
13901#else
13902 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13903 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13904#endif
13905 rcStrict = VINF_SUCCESS;
13906 }
13907 else
13908 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13909 if (rcStrict == VINF_SUCCESS)
13910 {
13911 rcStrict = iemExecOneInner(pVCpu, true);
13912 }
13913
13914#ifdef IN_RC
13915 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13916#endif
13917 return rcStrict;
13918}
13919
13920
13921VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13922{
13923 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13924 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13925
13926 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13927 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13928 if (rcStrict == VINF_SUCCESS)
13929 {
13930 rcStrict = iemExecOneInner(pVCpu, false);
13931 if (pcbWritten)
13932 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13933 }
13934
13935#ifdef IN_RC
13936 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13937#endif
13938 return rcStrict;
13939}
13940
13941
13942VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13943 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13944{
13945 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13946 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13947
13948 VBOXSTRICTRC rcStrict;
13949 if ( cbOpcodeBytes
13950 && pCtx->rip == OpcodeBytesPC)
13951 {
13952 iemInitDecoder(pVCpu, true);
13953#ifdef IEM_WITH_CODE_TLB
13954 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13955 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13956 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13957 pVCpu->iem.s.offCurInstrStart = 0;
13958 pVCpu->iem.s.offInstrNextByte = 0;
13959#else
13960 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13961 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13962#endif
13963 rcStrict = VINF_SUCCESS;
13964 }
13965 else
13966 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13967 if (rcStrict == VINF_SUCCESS)
13968 rcStrict = iemExecOneInner(pVCpu, false);
13969
13970#ifdef IN_RC
13971 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13972#endif
13973 return rcStrict;
13974}
13975
13976
13977/**
13978 * For debugging DISGetParamSize, may come in handy.
13979 *
13980 * @returns Strict VBox status code.
13981 * @param pVCpu The cross context virtual CPU structure of the
13982 * calling EMT.
13983 * @param pCtxCore The context core structure.
13984 * @param OpcodeBytesPC The PC of the opcode bytes.
13985 * @param pvOpcodeBytes Prefeched opcode bytes.
13986 * @param cbOpcodeBytes Number of prefetched bytes.
13987 * @param pcbWritten Where to return the number of bytes written.
13988 * Optional.
13989 */
13990VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13991 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13992 uint32_t *pcbWritten)
13993{
13994 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13995 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13996
13997 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13998 VBOXSTRICTRC rcStrict;
13999 if ( cbOpcodeBytes
14000 && pCtx->rip == OpcodeBytesPC)
14001 {
14002 iemInitDecoder(pVCpu, true);
14003#ifdef IEM_WITH_CODE_TLB
14004 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14005 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14006 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14007 pVCpu->iem.s.offCurInstrStart = 0;
14008 pVCpu->iem.s.offInstrNextByte = 0;
14009#else
14010 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14011 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14012#endif
14013 rcStrict = VINF_SUCCESS;
14014 }
14015 else
14016 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14017 if (rcStrict == VINF_SUCCESS)
14018 {
14019 rcStrict = iemExecOneInner(pVCpu, false);
14020 if (pcbWritten)
14021 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14022 }
14023
14024#ifdef IN_RC
14025 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14026#endif
14027 return rcStrict;
14028}
14029
14030
14031VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14032{
14033 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14034
14035#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14036 /*
14037 * See if there is an interrupt pending in TRPM, inject it if we can.
14038 */
14039 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14040# ifdef IEM_VERIFICATION_MODE_FULL
14041 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14042# endif
14043 if ( pCtx->eflags.Bits.u1IF
14044 && TRPMHasTrap(pVCpu)
14045 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14046 {
14047 uint8_t u8TrapNo;
14048 TRPMEVENT enmType;
14049 RTGCUINT uErrCode;
14050 RTGCPTR uCr2;
14051 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14052 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14053 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14054 TRPMResetTrap(pVCpu);
14055 }
14056
14057 /*
14058 * Log the state.
14059 */
14060# ifdef LOG_ENABLED
14061 iemLogCurInstr(pVCpu, pCtx, true);
14062# endif
14063
14064 /*
14065 * Do the decoding and emulation.
14066 */
14067 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14068 if (rcStrict == VINF_SUCCESS)
14069 rcStrict = iemExecOneInner(pVCpu, true);
14070
14071 /*
14072 * Assert some sanity.
14073 */
14074 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14075
14076 /*
14077 * Log and return.
14078 */
14079 if (rcStrict != VINF_SUCCESS)
14080 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14081 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14082 if (pcInstructions)
14083 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14084 return rcStrict;
14085
14086#else /* Not verification mode */
14087
14088 /*
14089 * See if there is an interrupt pending in TRPM, inject it if we can.
14090 */
14091 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14092# ifdef IEM_VERIFICATION_MODE_FULL
14093 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14094# endif
14095 if ( pCtx->eflags.Bits.u1IF
14096 && TRPMHasTrap(pVCpu)
14097 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14098 {
14099 uint8_t u8TrapNo;
14100 TRPMEVENT enmType;
14101 RTGCUINT uErrCode;
14102 RTGCPTR uCr2;
14103 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14104 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14105 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14106 TRPMResetTrap(pVCpu);
14107 }
14108
14109 /*
14110 * Initial decoder init w/ prefetch, then setup setjmp.
14111 */
14112 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14113 if (rcStrict == VINF_SUCCESS)
14114 {
14115# ifdef IEM_WITH_SETJMP
14116 jmp_buf JmpBuf;
14117 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14118 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14119 pVCpu->iem.s.cActiveMappings = 0;
14120 if ((rcStrict = setjmp(JmpBuf)) == 0)
14121# endif
14122 {
14123 /*
14124 * The run loop. We limit ourselves to 4096 instructions right now.
14125 */
14126 PVM pVM = pVCpu->CTX_SUFF(pVM);
14127 uint32_t cInstr = 4096;
14128 for (;;)
14129 {
14130 /*
14131 * Log the state.
14132 */
14133# ifdef LOG_ENABLED
14134 iemLogCurInstr(pVCpu, pCtx, true);
14135# endif
14136
14137 /*
14138 * Do the decoding and emulation.
14139 */
14140 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14141 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14142 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14143 {
14144 Assert(pVCpu->iem.s.cActiveMappings == 0);
14145 pVCpu->iem.s.cInstructions++;
14146 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14147 {
14148 uint32_t fCpu = pVCpu->fLocalForcedActions
14149 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14150 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14151 | VMCPU_FF_TLB_FLUSH
14152# ifdef VBOX_WITH_RAW_MODE
14153 | VMCPU_FF_TRPM_SYNC_IDT
14154 | VMCPU_FF_SELM_SYNC_TSS
14155 | VMCPU_FF_SELM_SYNC_GDT
14156 | VMCPU_FF_SELM_SYNC_LDT
14157# endif
14158 | VMCPU_FF_INHIBIT_INTERRUPTS
14159 | VMCPU_FF_BLOCK_NMIS ));
14160
14161 if (RT_LIKELY( ( !fCpu
14162 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14163 && !pCtx->rflags.Bits.u1IF) )
14164 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14165 {
14166 if (cInstr-- > 0)
14167 {
14168 Assert(pVCpu->iem.s.cActiveMappings == 0);
14169 iemReInitDecoder(pVCpu);
14170 continue;
14171 }
14172 }
14173 }
14174 Assert(pVCpu->iem.s.cActiveMappings == 0);
14175 }
14176 else if (pVCpu->iem.s.cActiveMappings > 0)
14177 iemMemRollback(pVCpu);
14178 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14179 break;
14180 }
14181 }
14182# ifdef IEM_WITH_SETJMP
14183 else
14184 {
14185 if (pVCpu->iem.s.cActiveMappings > 0)
14186 iemMemRollback(pVCpu);
14187 pVCpu->iem.s.cLongJumps++;
14188 }
14189 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14190# endif
14191
14192 /*
14193 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14194 */
14195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14197# if defined(IEM_VERIFICATION_MODE_FULL)
14198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14202# endif
14203 }
14204
14205 /*
14206 * Maybe re-enter raw-mode and log.
14207 */
14208# ifdef IN_RC
14209 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14210# endif
14211 if (rcStrict != VINF_SUCCESS)
14212 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14213 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14214 if (pcInstructions)
14215 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14216 return rcStrict;
14217#endif /* Not verification mode */
14218}
14219
14220
14221
14222/**
14223 * Injects a trap, fault, abort, software interrupt or external interrupt.
14224 *
14225 * The parameter list matches TRPMQueryTrapAll pretty closely.
14226 *
14227 * @returns Strict VBox status code.
14228 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14229 * @param u8TrapNo The trap number.
14230 * @param enmType What type is it (trap/fault/abort), software
14231 * interrupt or hardware interrupt.
14232 * @param uErrCode The error code if applicable.
14233 * @param uCr2 The CR2 value if applicable.
14234 * @param cbInstr The instruction length (only relevant for
14235 * software interrupts).
14236 */
14237VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14238 uint8_t cbInstr)
14239{
14240 iemInitDecoder(pVCpu, false);
14241#ifdef DBGFTRACE_ENABLED
14242 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14243 u8TrapNo, enmType, uErrCode, uCr2);
14244#endif
14245
14246 uint32_t fFlags;
14247 switch (enmType)
14248 {
14249 case TRPM_HARDWARE_INT:
14250 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14251 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14252 uErrCode = uCr2 = 0;
14253 break;
14254
14255 case TRPM_SOFTWARE_INT:
14256 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14257 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14258 uErrCode = uCr2 = 0;
14259 break;
14260
14261 case TRPM_TRAP:
14262 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14263 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14264 if (u8TrapNo == X86_XCPT_PF)
14265 fFlags |= IEM_XCPT_FLAGS_CR2;
14266 switch (u8TrapNo)
14267 {
14268 case X86_XCPT_DF:
14269 case X86_XCPT_TS:
14270 case X86_XCPT_NP:
14271 case X86_XCPT_SS:
14272 case X86_XCPT_PF:
14273 case X86_XCPT_AC:
14274 fFlags |= IEM_XCPT_FLAGS_ERR;
14275 break;
14276
14277 case X86_XCPT_NMI:
14278 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14279 break;
14280 }
14281 break;
14282
14283 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14284 }
14285
14286 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14287}
14288
14289
14290/**
14291 * Injects the active TRPM event.
14292 *
14293 * @returns Strict VBox status code.
14294 * @param pVCpu The cross context virtual CPU structure.
14295 */
14296VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14297{
14298#ifndef IEM_IMPLEMENTS_TASKSWITCH
14299 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14300#else
14301 uint8_t u8TrapNo;
14302 TRPMEVENT enmType;
14303 RTGCUINT uErrCode;
14304 RTGCUINTPTR uCr2;
14305 uint8_t cbInstr;
14306 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14307 if (RT_FAILURE(rc))
14308 return rc;
14309
14310 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14311
14312 /** @todo Are there any other codes that imply the event was successfully
14313 * delivered to the guest? See @bugref{6607}. */
14314 if ( rcStrict == VINF_SUCCESS
14315 || rcStrict == VINF_IEM_RAISED_XCPT)
14316 {
14317 TRPMResetTrap(pVCpu);
14318 }
14319 return rcStrict;
14320#endif
14321}
14322
14323
14324VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14325{
14326 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14327 return VERR_NOT_IMPLEMENTED;
14328}
14329
14330
14331VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14332{
14333 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14334 return VERR_NOT_IMPLEMENTED;
14335}
14336
14337
14338#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14339/**
14340 * Executes a IRET instruction with default operand size.
14341 *
14342 * This is for PATM.
14343 *
14344 * @returns VBox status code.
14345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14346 * @param pCtxCore The register frame.
14347 */
14348VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14349{
14350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14351
14352 iemCtxCoreToCtx(pCtx, pCtxCore);
14353 iemInitDecoder(pVCpu);
14354 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14355 if (rcStrict == VINF_SUCCESS)
14356 iemCtxToCtxCore(pCtxCore, pCtx);
14357 else
14358 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14359 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14360 return rcStrict;
14361}
14362#endif
14363
14364
14365/**
14366 * Macro used by the IEMExec* method to check the given instruction length.
14367 *
14368 * Will return on failure!
14369 *
14370 * @param a_cbInstr The given instruction length.
14371 * @param a_cbMin The minimum length.
14372 */
14373#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14374 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14375 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14376
14377
14378/**
14379 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14380 *
14381 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14382 *
14383 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14385 * @param rcStrict The status code to fiddle.
14386 */
14387DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14388{
14389 iemUninitExec(pVCpu);
14390#ifdef IN_RC
14391 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14392 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14393#else
14394 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14395#endif
14396}
14397
14398
14399/**
14400 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14401 *
14402 * This API ASSUMES that the caller has already verified that the guest code is
14403 * allowed to access the I/O port. (The I/O port is in the DX register in the
14404 * guest state.)
14405 *
14406 * @returns Strict VBox status code.
14407 * @param pVCpu The cross context virtual CPU structure.
14408 * @param cbValue The size of the I/O port access (1, 2, or 4).
14409 * @param enmAddrMode The addressing mode.
14410 * @param fRepPrefix Indicates whether a repeat prefix is used
14411 * (doesn't matter which for this instruction).
14412 * @param cbInstr The instruction length in bytes.
14413 * @param iEffSeg The effective segment address.
14414 * @param fIoChecked Whether the access to the I/O port has been
14415 * checked or not. It's typically checked in the
14416 * HM scenario.
14417 */
14418VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14419 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14420{
14421 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14422 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14423
14424 /*
14425 * State init.
14426 */
14427 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14428
14429 /*
14430 * Switch orgy for getting to the right handler.
14431 */
14432 VBOXSTRICTRC rcStrict;
14433 if (fRepPrefix)
14434 {
14435 switch (enmAddrMode)
14436 {
14437 case IEMMODE_16BIT:
14438 switch (cbValue)
14439 {
14440 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14441 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14442 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14443 default:
14444 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14445 }
14446 break;
14447
14448 case IEMMODE_32BIT:
14449 switch (cbValue)
14450 {
14451 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14452 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14453 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14454 default:
14455 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14456 }
14457 break;
14458
14459 case IEMMODE_64BIT:
14460 switch (cbValue)
14461 {
14462 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14463 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14464 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14465 default:
14466 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14467 }
14468 break;
14469
14470 default:
14471 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14472 }
14473 }
14474 else
14475 {
14476 switch (enmAddrMode)
14477 {
14478 case IEMMODE_16BIT:
14479 switch (cbValue)
14480 {
14481 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14482 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14483 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14484 default:
14485 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14486 }
14487 break;
14488
14489 case IEMMODE_32BIT:
14490 switch (cbValue)
14491 {
14492 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14493 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14494 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14495 default:
14496 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14497 }
14498 break;
14499
14500 case IEMMODE_64BIT:
14501 switch (cbValue)
14502 {
14503 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14504 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14505 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14506 default:
14507 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14508 }
14509 break;
14510
14511 default:
14512 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14513 }
14514 }
14515
14516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14517}
14518
14519
14520/**
14521 * Interface for HM and EM for executing string I/O IN (read) instructions.
14522 *
14523 * This API ASSUMES that the caller has already verified that the guest code is
14524 * allowed to access the I/O port. (The I/O port is in the DX register in the
14525 * guest state.)
14526 *
14527 * @returns Strict VBox status code.
14528 * @param pVCpu The cross context virtual CPU structure.
14529 * @param cbValue The size of the I/O port access (1, 2, or 4).
14530 * @param enmAddrMode The addressing mode.
14531 * @param fRepPrefix Indicates whether a repeat prefix is used
14532 * (doesn't matter which for this instruction).
14533 * @param cbInstr The instruction length in bytes.
14534 * @param fIoChecked Whether the access to the I/O port has been
14535 * checked or not. It's typically checked in the
14536 * HM scenario.
14537 */
14538VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14539 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14540{
14541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14542
14543 /*
14544 * State init.
14545 */
14546 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14547
14548 /*
14549 * Switch orgy for getting to the right handler.
14550 */
14551 VBOXSTRICTRC rcStrict;
14552 if (fRepPrefix)
14553 {
14554 switch (enmAddrMode)
14555 {
14556 case IEMMODE_16BIT:
14557 switch (cbValue)
14558 {
14559 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14560 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14561 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14562 default:
14563 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14564 }
14565 break;
14566
14567 case IEMMODE_32BIT:
14568 switch (cbValue)
14569 {
14570 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14571 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14572 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14573 default:
14574 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14575 }
14576 break;
14577
14578 case IEMMODE_64BIT:
14579 switch (cbValue)
14580 {
14581 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14582 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14583 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14584 default:
14585 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14586 }
14587 break;
14588
14589 default:
14590 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14591 }
14592 }
14593 else
14594 {
14595 switch (enmAddrMode)
14596 {
14597 case IEMMODE_16BIT:
14598 switch (cbValue)
14599 {
14600 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14601 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14602 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14603 default:
14604 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14605 }
14606 break;
14607
14608 case IEMMODE_32BIT:
14609 switch (cbValue)
14610 {
14611 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14612 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14613 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14614 default:
14615 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14616 }
14617 break;
14618
14619 case IEMMODE_64BIT:
14620 switch (cbValue)
14621 {
14622 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14623 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14624 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14625 default:
14626 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14627 }
14628 break;
14629
14630 default:
14631 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14632 }
14633 }
14634
14635 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14636}
14637
14638
14639/**
14640 * Interface for rawmode to write execute an OUT instruction.
14641 *
14642 * @returns Strict VBox status code.
14643 * @param pVCpu The cross context virtual CPU structure.
14644 * @param cbInstr The instruction length in bytes.
14645 * @param u16Port The port to read.
14646 * @param cbReg The register size.
14647 *
14648 * @remarks In ring-0 not all of the state needs to be synced in.
14649 */
14650VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14651{
14652 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14653 Assert(cbReg <= 4 && cbReg != 3);
14654
14655 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14656 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14657 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14658}
14659
14660
14661/**
14662 * Interface for rawmode to write execute an IN instruction.
14663 *
14664 * @returns Strict VBox status code.
14665 * @param pVCpu The cross context virtual CPU structure.
14666 * @param cbInstr The instruction length in bytes.
14667 * @param u16Port The port to read.
14668 * @param cbReg The register size.
14669 */
14670VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14671{
14672 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14673 Assert(cbReg <= 4 && cbReg != 3);
14674
14675 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14676 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14677 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14678}
14679
14680
14681/**
14682 * Interface for HM and EM to write to a CRx register.
14683 *
14684 * @returns Strict VBox status code.
14685 * @param pVCpu The cross context virtual CPU structure.
14686 * @param cbInstr The instruction length in bytes.
14687 * @param iCrReg The control register number (destination).
14688 * @param iGReg The general purpose register number (source).
14689 *
14690 * @remarks In ring-0 not all of the state needs to be synced in.
14691 */
14692VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14693{
14694 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14695 Assert(iCrReg < 16);
14696 Assert(iGReg < 16);
14697
14698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14699 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14700 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14701}
14702
14703
14704/**
14705 * Interface for HM and EM to read from a CRx register.
14706 *
14707 * @returns Strict VBox status code.
14708 * @param pVCpu The cross context virtual CPU structure.
14709 * @param cbInstr The instruction length in bytes.
14710 * @param iGReg The general purpose register number (destination).
14711 * @param iCrReg The control register number (source).
14712 *
14713 * @remarks In ring-0 not all of the state needs to be synced in.
14714 */
14715VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14716{
14717 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14718 Assert(iCrReg < 16);
14719 Assert(iGReg < 16);
14720
14721 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14722 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14723 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14724}
14725
14726
14727/**
14728 * Interface for HM and EM to clear the CR0[TS] bit.
14729 *
14730 * @returns Strict VBox status code.
14731 * @param pVCpu The cross context virtual CPU structure.
14732 * @param cbInstr The instruction length in bytes.
14733 *
14734 * @remarks In ring-0 not all of the state needs to be synced in.
14735 */
14736VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14737{
14738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14739
14740 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14741 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14742 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14743}
14744
14745
14746/**
14747 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14748 *
14749 * @returns Strict VBox status code.
14750 * @param pVCpu The cross context virtual CPU structure.
14751 * @param cbInstr The instruction length in bytes.
14752 * @param uValue The value to load into CR0.
14753 *
14754 * @remarks In ring-0 not all of the state needs to be synced in.
14755 */
14756VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14757{
14758 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14759
14760 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14761 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14762 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14763}
14764
14765
14766/**
14767 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14768 *
14769 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14770 *
14771 * @returns Strict VBox status code.
14772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14773 * @param cbInstr The instruction length in bytes.
14774 * @remarks In ring-0 not all of the state needs to be synced in.
14775 * @thread EMT(pVCpu)
14776 */
14777VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14778{
14779 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14780
14781 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14782 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14783 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14784}
14785
14786#ifdef IN_RING3
14787
14788/**
14789 * Handles the unlikely and probably fatal merge cases.
14790 *
14791 * @returns Merged status code.
14792 * @param rcStrict Current EM status code.
14793 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14794 * with @a rcStrict.
14795 * @param iMemMap The memory mapping index. For error reporting only.
14796 * @param pVCpu The cross context virtual CPU structure of the calling
14797 * thread, for error reporting only.
14798 */
14799DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14800 unsigned iMemMap, PVMCPU pVCpu)
14801{
14802 if (RT_FAILURE_NP(rcStrict))
14803 return rcStrict;
14804
14805 if (RT_FAILURE_NP(rcStrictCommit))
14806 return rcStrictCommit;
14807
14808 if (rcStrict == rcStrictCommit)
14809 return rcStrictCommit;
14810
14811 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14812 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14813 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14816 return VERR_IOM_FF_STATUS_IPE;
14817}
14818
14819
14820/**
14821 * Helper for IOMR3ProcessForceFlag.
14822 *
14823 * @returns Merged status code.
14824 * @param rcStrict Current EM status code.
14825 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14826 * with @a rcStrict.
14827 * @param iMemMap The memory mapping index. For error reporting only.
14828 * @param pVCpu The cross context virtual CPU structure of the calling
14829 * thread, for error reporting only.
14830 */
14831DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14832{
14833 /* Simple. */
14834 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14835 return rcStrictCommit;
14836
14837 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14838 return rcStrict;
14839
14840 /* EM scheduling status codes. */
14841 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14842 && rcStrict <= VINF_EM_LAST))
14843 {
14844 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14845 && rcStrictCommit <= VINF_EM_LAST))
14846 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14847 }
14848
14849 /* Unlikely */
14850 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14851}
14852
14853
14854/**
14855 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14856 *
14857 * @returns Merge between @a rcStrict and what the commit operation returned.
14858 * @param pVM The cross context VM structure.
14859 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14860 * @param rcStrict The status code returned by ring-0 or raw-mode.
14861 */
14862VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14863{
14864 /*
14865 * Reset the pending commit.
14866 */
14867 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14868 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14869 ("%#x %#x %#x\n",
14870 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14871 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14872
14873 /*
14874 * Commit the pending bounce buffers (usually just one).
14875 */
14876 unsigned cBufs = 0;
14877 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14878 while (iMemMap-- > 0)
14879 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14880 {
14881 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14882 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14883 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14884
14885 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14886 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14887 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14888
14889 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14890 {
14891 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14893 pbBuf,
14894 cbFirst,
14895 PGMACCESSORIGIN_IEM);
14896 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14897 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14898 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14899 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14900 }
14901
14902 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14903 {
14904 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14906 pbBuf + cbFirst,
14907 cbSecond,
14908 PGMACCESSORIGIN_IEM);
14909 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14910 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14911 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14912 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14913 }
14914 cBufs++;
14915 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14916 }
14917
14918 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14919 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14920 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14921 pVCpu->iem.s.cActiveMappings = 0;
14922 return rcStrict;
14923}
14924
14925#endif /* IN_RING3 */
14926
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette