VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 65398

Last change on this file since 65398 was 65368, checked in by vboxsync, 8 years ago

IEM: build fix for code tlb and wp fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 574.9 KB
Line 
1/* $Id: IEMAll.cpp 65368 2017-01-19 10:47:59Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#ifdef IEM_VERIFICATION_MODE_FULL
115# include <VBox/vmm/rem.h>
116# include <VBox/vmm/mm.h>
117#endif
118#include <VBox/vmm/vm.h>
119#include <VBox/log.h>
120#include <VBox/err.h>
121#include <VBox/param.h>
122#include <VBox/dis.h>
123#include <VBox/disopcode.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215
216/*********************************************************************************************************************************
217* Defined Constants And Macros *
218*********************************************************************************************************************************/
219/** @def IEM_WITH_SETJMP
220 * Enables alternative status code handling using setjmps.
221 *
222 * This adds a bit of expense via the setjmp() call since it saves all the
223 * non-volatile registers. However, it eliminates return code checks and allows
224 * for more optimal return value passing (return regs instead of stack buffer).
225 */
226#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
227# define IEM_WITH_SETJMP
228#endif
229
230/** Temporary hack to disable the double execution. Will be removed in favor
231 * of a dedicated execution mode in EM. */
232//#define IEM_VERIFICATION_MODE_NO_REM
233
234/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
235 * due to GCC lacking knowledge about the value range of a switch. */
236#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
237
238/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
240
241/**
242 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
243 * occation.
244 */
245#ifdef LOG_ENABLED
246# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
247 do { \
248 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
249 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
250 } while (0)
251#else
252# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
254#endif
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation using the supplied logger statement.
259 *
260 * @param a_LoggerArgs What to log on failure.
261 */
262#ifdef LOG_ENABLED
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 do { \
265 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
266 /*LogFunc(a_LoggerArgs);*/ \
267 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
268 } while (0)
269#else
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
272#endif
273
274/**
275 * Call an opcode decoder function.
276 *
277 * We're using macors for this so that adding and removing parameters can be
278 * done as we please. See FNIEMOP_DEF.
279 */
280#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
281
282/**
283 * Call a common opcode decoder function taking one extra argument.
284 *
285 * We're using macors for this so that adding and removing parameters can be
286 * done as we please. See FNIEMOP_DEF_1.
287 */
288#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
289
290/**
291 * Call a common opcode decoder function taking one extra argument.
292 *
293 * We're using macors for this so that adding and removing parameters can be
294 * done as we please. See FNIEMOP_DEF_1.
295 */
296#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
297
298/**
299 * Check if we're currently executing in real or virtual 8086 mode.
300 *
301 * @returns @c true if it is, @c false if not.
302 * @param a_pVCpu The IEM state of the current CPU.
303 */
304#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
305
306/**
307 * Check if we're currently executing in virtual 8086 mode.
308 *
309 * @returns @c true if it is, @c false if not.
310 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
311 */
312#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
313
314/**
315 * Check if we're currently executing in long mode.
316 *
317 * @returns @c true if it is, @c false if not.
318 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
319 */
320#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
321
322/**
323 * Check if we're currently executing in real mode.
324 *
325 * @returns @c true if it is, @c false if not.
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
336
337/**
338 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
339 * @returns PCCPUMFEATURES
340 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
341 */
342#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
343
344/**
345 * Evaluates to true if we're presenting an Intel CPU to the guest.
346 */
347#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
348
349/**
350 * Evaluates to true if we're presenting an AMD CPU to the guest.
351 */
352#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
353
354/**
355 * Check if the address is canonical.
356 */
357#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
358
359/** @def IEM_USE_UNALIGNED_DATA_ACCESS
360 * Use unaligned accesses instead of elaborate byte assembly. */
361#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
362# define IEM_USE_UNALIGNED_DATA_ACCESS
363#endif
364
365
366/*********************************************************************************************************************************
367* Global Variables *
368*********************************************************************************************************************************/
369extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
370
371
372/** Function table for the ADD instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
374{
375 iemAImpl_add_u8, iemAImpl_add_u8_locked,
376 iemAImpl_add_u16, iemAImpl_add_u16_locked,
377 iemAImpl_add_u32, iemAImpl_add_u32_locked,
378 iemAImpl_add_u64, iemAImpl_add_u64_locked
379};
380
381/** Function table for the ADC instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
383{
384 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
385 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
386 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
387 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
388};
389
390/** Function table for the SUB instruction. */
391IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
392{
393 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
394 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
395 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
396 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
397};
398
399/** Function table for the SBB instruction. */
400IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
401{
402 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
403 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
404 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
405 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
406};
407
408/** Function table for the OR instruction. */
409IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
410{
411 iemAImpl_or_u8, iemAImpl_or_u8_locked,
412 iemAImpl_or_u16, iemAImpl_or_u16_locked,
413 iemAImpl_or_u32, iemAImpl_or_u32_locked,
414 iemAImpl_or_u64, iemAImpl_or_u64_locked
415};
416
417/** Function table for the XOR instruction. */
418IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
419{
420 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
421 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
422 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
423 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
424};
425
426/** Function table for the AND instruction. */
427IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
428{
429 iemAImpl_and_u8, iemAImpl_and_u8_locked,
430 iemAImpl_and_u16, iemAImpl_and_u16_locked,
431 iemAImpl_and_u32, iemAImpl_and_u32_locked,
432 iemAImpl_and_u64, iemAImpl_and_u64_locked
433};
434
435/** Function table for the CMP instruction.
436 * @remarks Making operand order ASSUMPTIONS.
437 */
438IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
439{
440 iemAImpl_cmp_u8, NULL,
441 iemAImpl_cmp_u16, NULL,
442 iemAImpl_cmp_u32, NULL,
443 iemAImpl_cmp_u64, NULL
444};
445
446/** Function table for the TEST instruction.
447 * @remarks Making operand order ASSUMPTIONS.
448 */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
450{
451 iemAImpl_test_u8, NULL,
452 iemAImpl_test_u16, NULL,
453 iemAImpl_test_u32, NULL,
454 iemAImpl_test_u64, NULL
455};
456
457/** Function table for the BT instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
459{
460 NULL, NULL,
461 iemAImpl_bt_u16, NULL,
462 iemAImpl_bt_u32, NULL,
463 iemAImpl_bt_u64, NULL
464};
465
466/** Function table for the BTC instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
468{
469 NULL, NULL,
470 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
471 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
472 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
473};
474
475/** Function table for the BTR instruction. */
476IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
477{
478 NULL, NULL,
479 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
480 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
481 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
482};
483
484/** Function table for the BTS instruction. */
485IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
486{
487 NULL, NULL,
488 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
489 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
490 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
491};
492
493/** Function table for the BSF instruction. */
494IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
495{
496 NULL, NULL,
497 iemAImpl_bsf_u16, NULL,
498 iemAImpl_bsf_u32, NULL,
499 iemAImpl_bsf_u64, NULL
500};
501
502/** Function table for the BSR instruction. */
503IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
504{
505 NULL, NULL,
506 iemAImpl_bsr_u16, NULL,
507 iemAImpl_bsr_u32, NULL,
508 iemAImpl_bsr_u64, NULL
509};
510
511/** Function table for the IMUL instruction. */
512IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
513{
514 NULL, NULL,
515 iemAImpl_imul_two_u16, NULL,
516 iemAImpl_imul_two_u32, NULL,
517 iemAImpl_imul_two_u64, NULL
518};
519
520/** Group 1 /r lookup table. */
521IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
522{
523 &g_iemAImpl_add,
524 &g_iemAImpl_or,
525 &g_iemAImpl_adc,
526 &g_iemAImpl_sbb,
527 &g_iemAImpl_and,
528 &g_iemAImpl_sub,
529 &g_iemAImpl_xor,
530 &g_iemAImpl_cmp
531};
532
533/** Function table for the INC instruction. */
534IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
535{
536 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
537 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
538 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
539 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
540};
541
542/** Function table for the DEC instruction. */
543IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
544{
545 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
546 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
547 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
548 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
549};
550
551/** Function table for the NEG instruction. */
552IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
553{
554 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
555 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
556 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
557 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
558};
559
560/** Function table for the NOT instruction. */
561IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
562{
563 iemAImpl_not_u8, iemAImpl_not_u8_locked,
564 iemAImpl_not_u16, iemAImpl_not_u16_locked,
565 iemAImpl_not_u32, iemAImpl_not_u32_locked,
566 iemAImpl_not_u64, iemAImpl_not_u64_locked
567};
568
569
570/** Function table for the ROL instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
572{
573 iemAImpl_rol_u8,
574 iemAImpl_rol_u16,
575 iemAImpl_rol_u32,
576 iemAImpl_rol_u64
577};
578
579/** Function table for the ROR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
581{
582 iemAImpl_ror_u8,
583 iemAImpl_ror_u16,
584 iemAImpl_ror_u32,
585 iemAImpl_ror_u64
586};
587
588/** Function table for the RCL instruction. */
589IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
590{
591 iemAImpl_rcl_u8,
592 iemAImpl_rcl_u16,
593 iemAImpl_rcl_u32,
594 iemAImpl_rcl_u64
595};
596
597/** Function table for the RCR instruction. */
598IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
599{
600 iemAImpl_rcr_u8,
601 iemAImpl_rcr_u16,
602 iemAImpl_rcr_u32,
603 iemAImpl_rcr_u64
604};
605
606/** Function table for the SHL instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
608{
609 iemAImpl_shl_u8,
610 iemAImpl_shl_u16,
611 iemAImpl_shl_u32,
612 iemAImpl_shl_u64
613};
614
615/** Function table for the SHR instruction. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
617{
618 iemAImpl_shr_u8,
619 iemAImpl_shr_u16,
620 iemAImpl_shr_u32,
621 iemAImpl_shr_u64
622};
623
624/** Function table for the SAR instruction. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
626{
627 iemAImpl_sar_u8,
628 iemAImpl_sar_u16,
629 iemAImpl_sar_u32,
630 iemAImpl_sar_u64
631};
632
633
634/** Function table for the MUL instruction. */
635IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
636{
637 iemAImpl_mul_u8,
638 iemAImpl_mul_u16,
639 iemAImpl_mul_u32,
640 iemAImpl_mul_u64
641};
642
643/** Function table for the IMUL instruction working implicitly on rAX. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
645{
646 iemAImpl_imul_u8,
647 iemAImpl_imul_u16,
648 iemAImpl_imul_u32,
649 iemAImpl_imul_u64
650};
651
652/** Function table for the DIV instruction. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
654{
655 iemAImpl_div_u8,
656 iemAImpl_div_u16,
657 iemAImpl_div_u32,
658 iemAImpl_div_u64
659};
660
661/** Function table for the MUL instruction. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
663{
664 iemAImpl_idiv_u8,
665 iemAImpl_idiv_u16,
666 iemAImpl_idiv_u32,
667 iemAImpl_idiv_u64
668};
669
670/** Function table for the SHLD instruction */
671IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
672{
673 iemAImpl_shld_u16,
674 iemAImpl_shld_u32,
675 iemAImpl_shld_u64,
676};
677
678/** Function table for the SHRD instruction */
679IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
680{
681 iemAImpl_shrd_u16,
682 iemAImpl_shrd_u32,
683 iemAImpl_shrd_u64,
684};
685
686
687/** Function table for the PUNPCKLBW instruction */
688IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
689/** Function table for the PUNPCKLBD instruction */
690IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
691/** Function table for the PUNPCKLDQ instruction */
692IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
693/** Function table for the PUNPCKLQDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
695
696/** Function table for the PUNPCKHBW instruction */
697IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
698/** Function table for the PUNPCKHBD instruction */
699IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
700/** Function table for the PUNPCKHDQ instruction */
701IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
702/** Function table for the PUNPCKHQDQ instruction */
703IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
704
705/** Function table for the PXOR instruction */
706IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
707/** Function table for the PCMPEQB instruction */
708IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
709/** Function table for the PCMPEQW instruction */
710IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
711/** Function table for the PCMPEQD instruction */
712IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
713
714
715#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
716/** What IEM just wrote. */
717uint8_t g_abIemWrote[256];
718/** How much IEM just wrote. */
719size_t g_cbIemWrote;
720#endif
721
722
723/*********************************************************************************************************************************
724* Internal Functions *
725*********************************************************************************************************************************/
726IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
727IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
728IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
729IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
730/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
733IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
734IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
735IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
736IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
737IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
738IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
739IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
740IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
741IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
742IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
743#ifdef IEM_WITH_SETJMP
744DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
745DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
748DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
749#endif
750
751IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
752IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
754IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
755IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
757IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
758IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
759IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
760IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
761IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
762IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
763IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
764IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
765IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
766IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
767
768#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
769IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
770#endif
771IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
772IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
773
774
775
776/**
777 * Sets the pass up status.
778 *
779 * @returns VINF_SUCCESS.
780 * @param pVCpu The cross context virtual CPU structure of the
781 * calling thread.
782 * @param rcPassUp The pass up status. Must be informational.
783 * VINF_SUCCESS is not allowed.
784 */
785IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
786{
787 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
788
789 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
790 if (rcOldPassUp == VINF_SUCCESS)
791 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
792 /* If both are EM scheduling codes, use EM priority rules. */
793 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
794 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
795 {
796 if (rcPassUp < rcOldPassUp)
797 {
798 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
799 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
800 }
801 else
802 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
803 }
804 /* Override EM scheduling with specific status code. */
805 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
806 {
807 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
808 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
809 }
810 /* Don't override specific status code, first come first served. */
811 else
812 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
813 return VINF_SUCCESS;
814}
815
816
817/**
818 * Calculates the CPU mode.
819 *
820 * This is mainly for updating IEMCPU::enmCpuMode.
821 *
822 * @returns CPU mode.
823 * @param pCtx The register context for the CPU.
824 */
825DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
826{
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 return IEMMODE_64BIT;
829 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
830 return IEMMODE_32BIT;
831 return IEMMODE_16BIT;
832}
833
834
835/**
836 * Initializes the execution state.
837 *
838 * @param pVCpu The cross context virtual CPU structure of the
839 * calling thread.
840 * @param fBypassHandlers Whether to bypass access handlers.
841 *
842 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
843 * side-effects in strict builds.
844 */
845DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
846{
847 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
848
849 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
850
851#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
853 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
854 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
860#endif
861
862#ifdef VBOX_WITH_RAW_MODE_NOT_R0
863 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
864#endif
865 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
866 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
867#ifdef VBOX_STRICT
868 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
869 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
870 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
871 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
872 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
873 pVCpu->iem.s.uRexReg = 127;
874 pVCpu->iem.s.uRexB = 127;
875 pVCpu->iem.s.uRexIndex = 127;
876 pVCpu->iem.s.iEffSeg = 127;
877 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
878# ifdef IEM_WITH_CODE_TLB
879 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
880 pVCpu->iem.s.pbInstrBuf = NULL;
881 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
882 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
883 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
884 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
885# else
886 pVCpu->iem.s.offOpcode = 127;
887 pVCpu->iem.s.cbOpcode = 127;
888# endif
889#endif
890
891 pVCpu->iem.s.cActiveMappings = 0;
892 pVCpu->iem.s.iNextMapping = 0;
893 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
894 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
895#ifdef VBOX_WITH_RAW_MODE_NOT_R0
896 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
897 && pCtx->cs.u64Base == 0
898 && pCtx->cs.u32Limit == UINT32_MAX
899 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
900 if (!pVCpu->iem.s.fInPatchCode)
901 CPUMRawLeave(pVCpu, VINF_SUCCESS);
902#endif
903
904#ifdef IEM_VERIFICATION_MODE_FULL
905 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
906 pVCpu->iem.s.fNoRem = true;
907#endif
908}
909
910
911/**
912 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
913 *
914 * @param pVCpu The cross context virtual CPU structure of the
915 * calling thread.
916 */
917DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
918{
919 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
920#ifdef IEM_VERIFICATION_MODE_FULL
921 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
922#endif
923#ifdef VBOX_STRICT
924# ifdef IEM_WITH_CODE_TLB
925 NOREF(pVCpu);
926# else
927 pVCpu->iem.s.cbOpcode = 0;
928# endif
929#else
930 NOREF(pVCpu);
931#endif
932}
933
934
935/**
936 * Initializes the decoder state.
937 *
938 * iemReInitDecoder is mostly a copy of this function.
939 *
940 * @param pVCpu The cross context virtual CPU structure of the
941 * calling thread.
942 * @param fBypassHandlers Whether to bypass access handlers.
943 */
944DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
945{
946 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
947
948 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
949
950#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
951 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
952 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
955 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
959#endif
960
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
963#endif
964 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
965#ifdef IEM_VERIFICATION_MODE_FULL
966 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
967 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
968#endif
969 IEMMODE enmMode = iemCalcCpuMode(pCtx);
970 pVCpu->iem.s.enmCpuMode = enmMode;
971 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
972 pVCpu->iem.s.enmEffAddrMode = enmMode;
973 if (enmMode != IEMMODE_64BIT)
974 {
975 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
976 pVCpu->iem.s.enmEffOpSize = enmMode;
977 }
978 else
979 {
980 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
981 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
982 }
983 pVCpu->iem.s.fPrefixes = 0;
984 pVCpu->iem.s.uRexReg = 0;
985 pVCpu->iem.s.uRexB = 0;
986 pVCpu->iem.s.uRexIndex = 0;
987 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
988#ifdef IEM_WITH_CODE_TLB
989 pVCpu->iem.s.pbInstrBuf = NULL;
990 pVCpu->iem.s.offInstrNextByte = 0;
991 pVCpu->iem.s.offCurInstrStart = 0;
992# ifdef VBOX_STRICT
993 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
994 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
995 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
996# endif
997#else
998 pVCpu->iem.s.offOpcode = 0;
999 pVCpu->iem.s.cbOpcode = 0;
1000#endif
1001 pVCpu->iem.s.cActiveMappings = 0;
1002 pVCpu->iem.s.iNextMapping = 0;
1003 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1004 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1005#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1006 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1007 && pCtx->cs.u64Base == 0
1008 && pCtx->cs.u32Limit == UINT32_MAX
1009 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1010 if (!pVCpu->iem.s.fInPatchCode)
1011 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1012#endif
1013
1014#ifdef DBGFTRACE_ENABLED
1015 switch (enmMode)
1016 {
1017 case IEMMODE_64BIT:
1018 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1019 break;
1020 case IEMMODE_32BIT:
1021 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1022 break;
1023 case IEMMODE_16BIT:
1024 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1025 break;
1026 }
1027#endif
1028}
1029
1030
1031/**
1032 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1033 *
1034 * This is mostly a copy of iemInitDecoder.
1035 *
1036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1037 */
1038DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1039{
1040 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1041
1042 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1043
1044#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1052 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1053#endif
1054
1055 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1056#ifdef IEM_VERIFICATION_MODE_FULL
1057 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1058 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1059#endif
1060 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1061 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1062 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1063 pVCpu->iem.s.enmEffAddrMode = enmMode;
1064 if (enmMode != IEMMODE_64BIT)
1065 {
1066 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1067 pVCpu->iem.s.enmEffOpSize = enmMode;
1068 }
1069 else
1070 {
1071 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1072 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1073 }
1074 pVCpu->iem.s.fPrefixes = 0;
1075 pVCpu->iem.s.uRexReg = 0;
1076 pVCpu->iem.s.uRexB = 0;
1077 pVCpu->iem.s.uRexIndex = 0;
1078 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1079#ifdef IEM_WITH_CODE_TLB
1080 if (pVCpu->iem.s.pbInstrBuf)
1081 {
1082 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1083 - pVCpu->iem.s.uInstrBufPc;
1084 if (off < pVCpu->iem.s.cbInstrBufTotal)
1085 {
1086 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1087 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1088 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1089 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1090 else
1091 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1092 }
1093 else
1094 {
1095 pVCpu->iem.s.pbInstrBuf = NULL;
1096 pVCpu->iem.s.offInstrNextByte = 0;
1097 pVCpu->iem.s.offCurInstrStart = 0;
1098 pVCpu->iem.s.cbInstrBuf = 0;
1099 pVCpu->iem.s.cbInstrBufTotal = 0;
1100 }
1101 }
1102 else
1103 {
1104 pVCpu->iem.s.offInstrNextByte = 0;
1105 pVCpu->iem.s.offCurInstrStart = 0;
1106 pVCpu->iem.s.cbInstrBuf = 0;
1107 pVCpu->iem.s.cbInstrBufTotal = 0;
1108 }
1109#else
1110 pVCpu->iem.s.cbOpcode = 0;
1111 pVCpu->iem.s.offOpcode = 0;
1112#endif
1113 Assert(pVCpu->iem.s.cActiveMappings == 0);
1114 pVCpu->iem.s.iNextMapping = 0;
1115 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1116 Assert(pVCpu->iem.s.fBypassHandlers == false);
1117#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1118 if (!pVCpu->iem.s.fInPatchCode)
1119 { /* likely */ }
1120 else
1121 {
1122 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1123 && pCtx->cs.u64Base == 0
1124 && pCtx->cs.u32Limit == UINT32_MAX
1125 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1126 if (!pVCpu->iem.s.fInPatchCode)
1127 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1128 }
1129#endif
1130
1131#ifdef DBGFTRACE_ENABLED
1132 switch (enmMode)
1133 {
1134 case IEMMODE_64BIT:
1135 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1136 break;
1137 case IEMMODE_32BIT:
1138 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1139 break;
1140 case IEMMODE_16BIT:
1141 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1142 break;
1143 }
1144#endif
1145}
1146
1147
1148
1149/**
1150 * Prefetch opcodes the first time when starting executing.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param fBypassHandlers Whether to bypass access handlers.
1156 */
1157IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1158{
1159#ifdef IEM_VERIFICATION_MODE_FULL
1160 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1161#endif
1162 iemInitDecoder(pVCpu, fBypassHandlers);
1163
1164#ifdef IEM_WITH_CODE_TLB
1165 /** @todo Do ITLB lookup here. */
1166
1167#else /* !IEM_WITH_CODE_TLB */
1168
1169 /*
1170 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1171 *
1172 * First translate CS:rIP to a physical address.
1173 */
1174 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1175 uint32_t cbToTryRead;
1176 RTGCPTR GCPtrPC;
1177 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1178 {
1179 cbToTryRead = PAGE_SIZE;
1180 GCPtrPC = pCtx->rip;
1181 if (!IEM_IS_CANONICAL(GCPtrPC))
1182 return iemRaiseGeneralProtectionFault0(pVCpu);
1183 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1184 }
1185 else
1186 {
1187 uint32_t GCPtrPC32 = pCtx->eip;
1188 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1189 if (GCPtrPC32 > pCtx->cs.u32Limit)
1190 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1191 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1192 if (!cbToTryRead) /* overflowed */
1193 {
1194 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1195 cbToTryRead = UINT32_MAX;
1196 }
1197 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1198 Assert(GCPtrPC <= UINT32_MAX);
1199 }
1200
1201# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1202 /* Allow interpretation of patch manager code blocks since they can for
1203 instance throw #PFs for perfectly good reasons. */
1204 if (pVCpu->iem.s.fInPatchCode)
1205 {
1206 size_t cbRead = 0;
1207 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1208 AssertRCReturn(rc, rc);
1209 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1210 return VINF_SUCCESS;
1211 }
1212# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1213
1214 RTGCPHYS GCPhys;
1215 uint64_t fFlags;
1216 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1217 if (RT_FAILURE(rc))
1218 {
1219 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1220 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1221 }
1222 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1223 {
1224 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1225 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1226 }
1227 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1228 {
1229 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1230 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1231 }
1232 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1233 /** @todo Check reserved bits and such stuff. PGM is better at doing
1234 * that, so do it when implementing the guest virtual address
1235 * TLB... */
1236
1237# ifdef IEM_VERIFICATION_MODE_FULL
1238 /*
1239 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1240 * instruction.
1241 */
1242 /** @todo optimize this differently by not using PGMPhysRead. */
1243 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1244 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1245 if ( offPrevOpcodes < cbOldOpcodes
1246 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1247 {
1248 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1249 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1250 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1251 pVCpu->iem.s.cbOpcode = cbNew;
1252 return VINF_SUCCESS;
1253 }
1254# endif
1255
1256 /*
1257 * Read the bytes at this address.
1258 */
1259 PVM pVM = pVCpu->CTX_SUFF(pVM);
1260# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1261 size_t cbActual;
1262 if ( PATMIsEnabled(pVM)
1263 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1264 {
1265 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1266 Assert(cbActual > 0);
1267 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1268 }
1269 else
1270# endif
1271 {
1272 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1273 if (cbToTryRead > cbLeftOnPage)
1274 cbToTryRead = cbLeftOnPage;
1275 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1276 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1277
1278 if (!pVCpu->iem.s.fBypassHandlers)
1279 {
1280 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1281 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1282 { /* likely */ }
1283 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1284 {
1285 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1286 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1287 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1288 }
1289 else
1290 {
1291 Log((RT_SUCCESS(rcStrict)
1292 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1293 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1294 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1295 return rcStrict;
1296 }
1297 }
1298 else
1299 {
1300 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1301 if (RT_SUCCESS(rc))
1302 { /* likely */ }
1303 else
1304 {
1305 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1306 GCPtrPC, GCPhys, rc, cbToTryRead));
1307 return rc;
1308 }
1309 }
1310 pVCpu->iem.s.cbOpcode = cbToTryRead;
1311 }
1312#endif /* !IEM_WITH_CODE_TLB */
1313 return VINF_SUCCESS;
1314}
1315
1316
1317/**
1318 * Invalidates the IEM TLBs.
1319 *
1320 * This is called internally as well as by PGM when moving GC mappings.
1321 *
1322 * @returns
1323 * @param pVCpu The cross context virtual CPU structure of the calling
1324 * thread.
1325 * @param fVmm Set when PGM calls us with a remapping.
1326 */
1327VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1328{
1329#ifdef IEM_WITH_CODE_TLB
1330 pVCpu->iem.s.cbInstrBufTotal = 0;
1331 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1332 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1333 { /* very likely */ }
1334 else
1335 {
1336 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1337 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1338 while (i-- > 0)
1339 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1340 }
1341#endif
1342
1343#ifdef IEM_WITH_DATA_TLB
1344 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1345 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1346 { /* very likely */ }
1347 else
1348 {
1349 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1350 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1351 while (i-- > 0)
1352 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1353 }
1354#endif
1355 NOREF(pVCpu); NOREF(fVmm);
1356}
1357
1358
1359/**
1360 * Invalidates a page in the TLBs.
1361 *
1362 * @param pVCpu The cross context virtual CPU structure of the calling
1363 * thread.
1364 * @param GCPtr The address of the page to invalidate
1365 */
1366VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1367{
1368#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1369 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1370 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1371 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1372 uintptr_t idx = (uint8_t)GCPtr;
1373
1374# ifdef IEM_WITH_CODE_TLB
1375 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1376 {
1377 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1378 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1379 pVCpu->iem.s.cbInstrBufTotal = 0;
1380 }
1381# endif
1382
1383# ifdef IEM_WITH_DATA_TLB
1384 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1385 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1386# endif
1387#else
1388 NOREF(pVCpu); NOREF(GCPtr);
1389#endif
1390}
1391
1392
1393/**
1394 * Invalidates the host physical aspects of the IEM TLBs.
1395 *
1396 * This is called internally as well as by PGM when moving GC mappings.
1397 *
1398 * @param pVCpu The cross context virtual CPU structure of the calling
1399 * thread.
1400 */
1401VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1402{
1403#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1404 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1405
1406# ifdef IEM_WITH_CODE_TLB
1407 pVCpu->iem.s.cbInstrBufTotal = 0;
1408# endif
1409 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1410 if (uTlbPhysRev != 0)
1411 {
1412 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1413 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1414 }
1415 else
1416 {
1417 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1418 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1419
1420 unsigned i;
1421# ifdef IEM_WITH_CODE_TLB
1422 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1423 while (i-- > 0)
1424 {
1425 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1426 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1427 }
1428# endif
1429# ifdef IEM_WITH_DATA_TLB
1430 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1431 while (i-- > 0)
1432 {
1433 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1434 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1435 }
1436# endif
1437 }
1438#else
1439 NOREF(pVCpu);
1440#endif
1441}
1442
1443
1444/**
1445 * Invalidates the host physical aspects of the IEM TLBs.
1446 *
1447 * This is called internally as well as by PGM when moving GC mappings.
1448 *
1449 * @param pVM The cross context VM structure.
1450 *
1451 * @remarks Caller holds the PGM lock.
1452 */
1453VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1454{
1455 RT_NOREF_PV(pVM);
1456}
1457
1458#ifdef IEM_WITH_CODE_TLB
1459
1460/**
1461 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1462 * failure and jumps.
1463 *
1464 * We end up here for a number of reasons:
1465 * - pbInstrBuf isn't yet initialized.
1466 * - Advancing beyond the buffer boundrary (e.g. cross page).
1467 * - Advancing beyond the CS segment limit.
1468 * - Fetching from non-mappable page (e.g. MMIO).
1469 *
1470 * @param pVCpu The cross context virtual CPU structure of the
1471 * calling thread.
1472 * @param pvDst Where to return the bytes.
1473 * @param cbDst Number of bytes to read.
1474 *
1475 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1476 */
1477IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1478{
1479#ifdef IN_RING3
1480//__debugbreak();
1481 for (;;)
1482 {
1483 Assert(cbDst <= 8);
1484 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1485
1486 /*
1487 * We might have a partial buffer match, deal with that first to make the
1488 * rest simpler. This is the first part of the cross page/buffer case.
1489 */
1490 if (pVCpu->iem.s.pbInstrBuf != NULL)
1491 {
1492 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1493 {
1494 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1495 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1496 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1497
1498 cbDst -= cbCopy;
1499 pvDst = (uint8_t *)pvDst + cbCopy;
1500 offBuf += cbCopy;
1501 pVCpu->iem.s.offInstrNextByte += offBuf;
1502 }
1503 }
1504
1505 /*
1506 * Check segment limit, figuring how much we're allowed to access at this point.
1507 *
1508 * We will fault immediately if RIP is past the segment limit / in non-canonical
1509 * territory. If we do continue, there are one or more bytes to read before we
1510 * end up in trouble and we need to do that first before faulting.
1511 */
1512 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1513 RTGCPTR GCPtrFirst;
1514 uint32_t cbMaxRead;
1515 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1516 {
1517 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1518 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1519 { /* likely */ }
1520 else
1521 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1522 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1523 }
1524 else
1525 {
1526 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1527 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1528 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1529 { /* likely */ }
1530 else
1531 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1532 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1533 if (cbMaxRead != 0)
1534 { /* likely */ }
1535 else
1536 {
1537 /* Overflowed because address is 0 and limit is max. */
1538 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1539 cbMaxRead = X86_PAGE_SIZE;
1540 }
1541 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1542 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1543 if (cbMaxRead2 < cbMaxRead)
1544 cbMaxRead = cbMaxRead2;
1545 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1546 }
1547
1548 /*
1549 * Get the TLB entry for this piece of code.
1550 */
1551 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1552 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1553 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1554 if (pTlbe->uTag == uTag)
1555 {
1556 /* likely when executing lots of code, otherwise unlikely */
1557# ifdef VBOX_WITH_STATISTICS
1558 pVCpu->iem.s.CodeTlb.cTlbHits++;
1559# endif
1560 }
1561 else
1562 {
1563 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1564# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1565 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1566 {
1567 pTlbe->uTag = uTag;
1568 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1569 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1570 pTlbe->GCPhys = NIL_RTGCPHYS;
1571 pTlbe->pbMappingR3 = NULL;
1572 }
1573 else
1574# endif
1575 {
1576 RTGCPHYS GCPhys;
1577 uint64_t fFlags;
1578 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1579 if (RT_FAILURE(rc))
1580 {
1581 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1582 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1583 }
1584
1585 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1586 pTlbe->uTag = uTag;
1587 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1588 pTlbe->GCPhys = GCPhys;
1589 pTlbe->pbMappingR3 = NULL;
1590 }
1591 }
1592
1593 /*
1594 * Check TLB page table level access flags.
1595 */
1596 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1597 {
1598 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1599 {
1600 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1601 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1602 }
1603 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1604 {
1605 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1606 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1607 }
1608 }
1609
1610# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1611 /*
1612 * Allow interpretation of patch manager code blocks since they can for
1613 * instance throw #PFs for perfectly good reasons.
1614 */
1615 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1616 { /* no unlikely */ }
1617 else
1618 {
1619 /** @todo Could be optimized this a little in ring-3 if we liked. */
1620 size_t cbRead = 0;
1621 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1622 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1623 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1624 return;
1625 }
1626# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1627
1628 /*
1629 * Look up the physical page info if necessary.
1630 */
1631 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1632 { /* not necessary */ }
1633 else
1634 {
1635 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1636 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1637 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1638 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1639 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1640 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1641 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1642 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1643 }
1644
1645# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1646 /*
1647 * Try do a direct read using the pbMappingR3 pointer.
1648 */
1649 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1650 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1651 {
1652 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1653 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1654 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1655 {
1656 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1657 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1658 }
1659 else
1660 {
1661 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1662 Assert(cbInstr < cbMaxRead);
1663 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1664 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1665 }
1666 if (cbDst <= cbMaxRead)
1667 {
1668 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1669 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1670 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1671 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1672 return;
1673 }
1674 pVCpu->iem.s.pbInstrBuf = NULL;
1675
1676 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1677 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1678 }
1679 else
1680# endif
1681#if 0
1682 /*
1683 * If there is no special read handling, so we can read a bit more and
1684 * put it in the prefetch buffer.
1685 */
1686 if ( cbDst < cbMaxRead
1687 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1688 {
1689 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1690 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1691 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1692 { /* likely */ }
1693 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1694 {
1695 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1696 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1697 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1698 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1699 }
1700 else
1701 {
1702 Log((RT_SUCCESS(rcStrict)
1703 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1704 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1705 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1706 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1707 }
1708 }
1709 /*
1710 * Special read handling, so only read exactly what's needed.
1711 * This is a highly unlikely scenario.
1712 */
1713 else
1714#endif
1715 {
1716 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1717 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1718 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1719 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1720 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1721 { /* likely */ }
1722 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1723 {
1724 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1725 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1726 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1727 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1728 }
1729 else
1730 {
1731 Log((RT_SUCCESS(rcStrict)
1732 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1733 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1734 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1735 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1736 }
1737 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1738 if (cbToRead == cbDst)
1739 return;
1740 }
1741
1742 /*
1743 * More to read, loop.
1744 */
1745 cbDst -= cbMaxRead;
1746 pvDst = (uint8_t *)pvDst + cbMaxRead;
1747 }
1748#else
1749 RT_NOREF(pvDst, cbDst);
1750 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1751#endif
1752}
1753
1754#else
1755
1756/**
1757 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1758 * exception if it fails.
1759 *
1760 * @returns Strict VBox status code.
1761 * @param pVCpu The cross context virtual CPU structure of the
1762 * calling thread.
1763 * @param cbMin The minimum number of bytes relative offOpcode
1764 * that must be read.
1765 */
1766IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1767{
1768 /*
1769 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1770 *
1771 * First translate CS:rIP to a physical address.
1772 */
1773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1774 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1775 uint32_t cbToTryRead;
1776 RTGCPTR GCPtrNext;
1777 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1778 {
1779 cbToTryRead = PAGE_SIZE;
1780 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1781 if (!IEM_IS_CANONICAL(GCPtrNext))
1782 return iemRaiseGeneralProtectionFault0(pVCpu);
1783 }
1784 else
1785 {
1786 uint32_t GCPtrNext32 = pCtx->eip;
1787 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1788 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1789 if (GCPtrNext32 > pCtx->cs.u32Limit)
1790 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1791 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1792 if (!cbToTryRead) /* overflowed */
1793 {
1794 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1795 cbToTryRead = UINT32_MAX;
1796 /** @todo check out wrapping around the code segment. */
1797 }
1798 if (cbToTryRead < cbMin - cbLeft)
1799 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1800 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1801 }
1802
1803 /* Only read up to the end of the page, and make sure we don't read more
1804 than the opcode buffer can hold. */
1805 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1806 if (cbToTryRead > cbLeftOnPage)
1807 cbToTryRead = cbLeftOnPage;
1808 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1809 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1810/** @todo r=bird: Convert assertion into undefined opcode exception? */
1811 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1812
1813# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1814 /* Allow interpretation of patch manager code blocks since they can for
1815 instance throw #PFs for perfectly good reasons. */
1816 if (pVCpu->iem.s.fInPatchCode)
1817 {
1818 size_t cbRead = 0;
1819 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1820 AssertRCReturn(rc, rc);
1821 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1822 return VINF_SUCCESS;
1823 }
1824# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1825
1826 RTGCPHYS GCPhys;
1827 uint64_t fFlags;
1828 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1829 if (RT_FAILURE(rc))
1830 {
1831 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1832 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1833 }
1834 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1835 {
1836 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1837 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1838 }
1839 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1840 {
1841 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1842 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1843 }
1844 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1845 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1846 /** @todo Check reserved bits and such stuff. PGM is better at doing
1847 * that, so do it when implementing the guest virtual address
1848 * TLB... */
1849
1850 /*
1851 * Read the bytes at this address.
1852 *
1853 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1854 * and since PATM should only patch the start of an instruction there
1855 * should be no need to check again here.
1856 */
1857 if (!pVCpu->iem.s.fBypassHandlers)
1858 {
1859 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1860 cbToTryRead, PGMACCESSORIGIN_IEM);
1861 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1862 { /* likely */ }
1863 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1864 {
1865 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1866 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1867 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1868 }
1869 else
1870 {
1871 Log((RT_SUCCESS(rcStrict)
1872 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1873 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1874 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1875 return rcStrict;
1876 }
1877 }
1878 else
1879 {
1880 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1881 if (RT_SUCCESS(rc))
1882 { /* likely */ }
1883 else
1884 {
1885 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1886 return rc;
1887 }
1888 }
1889 pVCpu->iem.s.cbOpcode += cbToTryRead;
1890 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1891
1892 return VINF_SUCCESS;
1893}
1894
1895#endif /* !IEM_WITH_CODE_TLB */
1896#ifndef IEM_WITH_SETJMP
1897
1898/**
1899 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1900 *
1901 * @returns Strict VBox status code.
1902 * @param pVCpu The cross context virtual CPU structure of the
1903 * calling thread.
1904 * @param pb Where to return the opcode byte.
1905 */
1906DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1907{
1908 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1909 if (rcStrict == VINF_SUCCESS)
1910 {
1911 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1912 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1913 pVCpu->iem.s.offOpcode = offOpcode + 1;
1914 }
1915 else
1916 *pb = 0;
1917 return rcStrict;
1918}
1919
1920
1921/**
1922 * Fetches the next opcode byte.
1923 *
1924 * @returns Strict VBox status code.
1925 * @param pVCpu The cross context virtual CPU structure of the
1926 * calling thread.
1927 * @param pu8 Where to return the opcode byte.
1928 */
1929DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1930{
1931 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1932 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1933 {
1934 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1935 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1936 return VINF_SUCCESS;
1937 }
1938 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1939}
1940
1941#else /* IEM_WITH_SETJMP */
1942
1943/**
1944 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1945 *
1946 * @returns The opcode byte.
1947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1948 */
1949DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1950{
1951# ifdef IEM_WITH_CODE_TLB
1952 uint8_t u8;
1953 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1954 return u8;
1955# else
1956 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1957 if (rcStrict == VINF_SUCCESS)
1958 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1959 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1960# endif
1961}
1962
1963
1964/**
1965 * Fetches the next opcode byte, longjmp on error.
1966 *
1967 * @returns The opcode byte.
1968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1969 */
1970DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1971{
1972# ifdef IEM_WITH_CODE_TLB
1973 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1974 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1975 if (RT_LIKELY( pbBuf != NULL
1976 && offBuf < pVCpu->iem.s.cbInstrBuf))
1977 {
1978 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1979 return pbBuf[offBuf];
1980 }
1981# else
1982 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1983 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1984 {
1985 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1986 return pVCpu->iem.s.abOpcode[offOpcode];
1987 }
1988# endif
1989 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1990}
1991
1992#endif /* IEM_WITH_SETJMP */
1993
1994/**
1995 * Fetches the next opcode byte, returns automatically on failure.
1996 *
1997 * @param a_pu8 Where to return the opcode byte.
1998 * @remark Implicitly references pVCpu.
1999 */
2000#ifndef IEM_WITH_SETJMP
2001# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2002 do \
2003 { \
2004 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2005 if (rcStrict2 == VINF_SUCCESS) \
2006 { /* likely */ } \
2007 else \
2008 return rcStrict2; \
2009 } while (0)
2010#else
2011# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2012#endif /* IEM_WITH_SETJMP */
2013
2014
2015#ifndef IEM_WITH_SETJMP
2016/**
2017 * Fetches the next signed byte from the opcode stream.
2018 *
2019 * @returns Strict VBox status code.
2020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2021 * @param pi8 Where to return the signed byte.
2022 */
2023DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2024{
2025 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2026}
2027#endif /* !IEM_WITH_SETJMP */
2028
2029
2030/**
2031 * Fetches the next signed byte from the opcode stream, returning automatically
2032 * on failure.
2033 *
2034 * @param a_pi8 Where to return the signed byte.
2035 * @remark Implicitly references pVCpu.
2036 */
2037#ifndef IEM_WITH_SETJMP
2038# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2039 do \
2040 { \
2041 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2042 if (rcStrict2 != VINF_SUCCESS) \
2043 return rcStrict2; \
2044 } while (0)
2045#else /* IEM_WITH_SETJMP */
2046# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2047
2048#endif /* IEM_WITH_SETJMP */
2049
2050#ifndef IEM_WITH_SETJMP
2051
2052/**
2053 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2054 *
2055 * @returns Strict VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2057 * @param pu16 Where to return the opcode dword.
2058 */
2059DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2060{
2061 uint8_t u8;
2062 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2063 if (rcStrict == VINF_SUCCESS)
2064 *pu16 = (int8_t)u8;
2065 return rcStrict;
2066}
2067
2068
2069/**
2070 * Fetches the next signed byte from the opcode stream, extending it to
2071 * unsigned 16-bit.
2072 *
2073 * @returns Strict VBox status code.
2074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2075 * @param pu16 Where to return the unsigned word.
2076 */
2077DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2078{
2079 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2080 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2081 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2082
2083 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2084 pVCpu->iem.s.offOpcode = offOpcode + 1;
2085 return VINF_SUCCESS;
2086}
2087
2088#endif /* !IEM_WITH_SETJMP */
2089
2090/**
2091 * Fetches the next signed byte from the opcode stream and sign-extending it to
2092 * a word, returning automatically on failure.
2093 *
2094 * @param a_pu16 Where to return the word.
2095 * @remark Implicitly references pVCpu.
2096 */
2097#ifndef IEM_WITH_SETJMP
2098# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2099 do \
2100 { \
2101 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2102 if (rcStrict2 != VINF_SUCCESS) \
2103 return rcStrict2; \
2104 } while (0)
2105#else
2106# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2107#endif
2108
2109#ifndef IEM_WITH_SETJMP
2110
2111/**
2112 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2113 *
2114 * @returns Strict VBox status code.
2115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2116 * @param pu32 Where to return the opcode dword.
2117 */
2118DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2119{
2120 uint8_t u8;
2121 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2122 if (rcStrict == VINF_SUCCESS)
2123 *pu32 = (int8_t)u8;
2124 return rcStrict;
2125}
2126
2127
2128/**
2129 * Fetches the next signed byte from the opcode stream, extending it to
2130 * unsigned 32-bit.
2131 *
2132 * @returns Strict VBox status code.
2133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2134 * @param pu32 Where to return the unsigned dword.
2135 */
2136DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2137{
2138 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2139 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2140 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2141
2142 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2143 pVCpu->iem.s.offOpcode = offOpcode + 1;
2144 return VINF_SUCCESS;
2145}
2146
2147#endif /* !IEM_WITH_SETJMP */
2148
2149/**
2150 * Fetches the next signed byte from the opcode stream and sign-extending it to
2151 * a word, returning automatically on failure.
2152 *
2153 * @param a_pu32 Where to return the word.
2154 * @remark Implicitly references pVCpu.
2155 */
2156#ifndef IEM_WITH_SETJMP
2157#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2158 do \
2159 { \
2160 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2161 if (rcStrict2 != VINF_SUCCESS) \
2162 return rcStrict2; \
2163 } while (0)
2164#else
2165# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2166#endif
2167
2168#ifndef IEM_WITH_SETJMP
2169
2170/**
2171 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2172 *
2173 * @returns Strict VBox status code.
2174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2175 * @param pu64 Where to return the opcode qword.
2176 */
2177DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2178{
2179 uint8_t u8;
2180 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2181 if (rcStrict == VINF_SUCCESS)
2182 *pu64 = (int8_t)u8;
2183 return rcStrict;
2184}
2185
2186
2187/**
2188 * Fetches the next signed byte from the opcode stream, extending it to
2189 * unsigned 64-bit.
2190 *
2191 * @returns Strict VBox status code.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 * @param pu64 Where to return the unsigned qword.
2194 */
2195DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2196{
2197 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2198 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2199 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2200
2201 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2202 pVCpu->iem.s.offOpcode = offOpcode + 1;
2203 return VINF_SUCCESS;
2204}
2205
2206#endif /* !IEM_WITH_SETJMP */
2207
2208
2209/**
2210 * Fetches the next signed byte from the opcode stream and sign-extending it to
2211 * a word, returning automatically on failure.
2212 *
2213 * @param a_pu64 Where to return the word.
2214 * @remark Implicitly references pVCpu.
2215 */
2216#ifndef IEM_WITH_SETJMP
2217# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2218 do \
2219 { \
2220 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2221 if (rcStrict2 != VINF_SUCCESS) \
2222 return rcStrict2; \
2223 } while (0)
2224#else
2225# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2226#endif
2227
2228
2229#ifndef IEM_WITH_SETJMP
2230
2231/**
2232 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2233 *
2234 * @returns Strict VBox status code.
2235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2236 * @param pu16 Where to return the opcode word.
2237 */
2238DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2239{
2240 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2241 if (rcStrict == VINF_SUCCESS)
2242 {
2243 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2244# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2245 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2246# else
2247 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2248# endif
2249 pVCpu->iem.s.offOpcode = offOpcode + 2;
2250 }
2251 else
2252 *pu16 = 0;
2253 return rcStrict;
2254}
2255
2256
2257/**
2258 * Fetches the next opcode word.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the opcode word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2268 {
2269 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2270# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2271 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2272# else
2273 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2274# endif
2275 return VINF_SUCCESS;
2276 }
2277 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2278}
2279
2280#else /* IEM_WITH_SETJMP */
2281
2282/**
2283 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2284 *
2285 * @returns The opcode word.
2286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2287 */
2288DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2289{
2290# ifdef IEM_WITH_CODE_TLB
2291 uint16_t u16;
2292 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2293 return u16;
2294# else
2295 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2296 if (rcStrict == VINF_SUCCESS)
2297 {
2298 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2299 pVCpu->iem.s.offOpcode += 2;
2300# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2301 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2302# else
2303 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2304# endif
2305 }
2306 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2307# endif
2308}
2309
2310
2311/**
2312 * Fetches the next opcode word, longjmp on error.
2313 *
2314 * @returns The opcode word.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 */
2317DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2318{
2319# ifdef IEM_WITH_CODE_TLB
2320 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2321 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2322 if (RT_LIKELY( pbBuf != NULL
2323 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2324 {
2325 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2326# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2327 return *(uint16_t const *)&pbBuf[offBuf];
2328# else
2329 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2330# endif
2331 }
2332# else
2333 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2334 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2335 {
2336 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2337# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2338 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2339# else
2340 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2341# endif
2342 }
2343# endif
2344 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2345}
2346
2347#endif /* IEM_WITH_SETJMP */
2348
2349
2350/**
2351 * Fetches the next opcode word, returns automatically on failure.
2352 *
2353 * @param a_pu16 Where to return the opcode word.
2354 * @remark Implicitly references pVCpu.
2355 */
2356#ifndef IEM_WITH_SETJMP
2357# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2358 do \
2359 { \
2360 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2361 if (rcStrict2 != VINF_SUCCESS) \
2362 return rcStrict2; \
2363 } while (0)
2364#else
2365# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2366#endif
2367
2368#ifndef IEM_WITH_SETJMP
2369
2370/**
2371 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu32 Where to return the opcode double word.
2376 */
2377DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2378{
2379 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2380 if (rcStrict == VINF_SUCCESS)
2381 {
2382 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2383 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2384 pVCpu->iem.s.offOpcode = offOpcode + 2;
2385 }
2386 else
2387 *pu32 = 0;
2388 return rcStrict;
2389}
2390
2391
2392/**
2393 * Fetches the next opcode word, zero extending it to a double word.
2394 *
2395 * @returns Strict VBox status code.
2396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2397 * @param pu32 Where to return the opcode double word.
2398 */
2399DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2400{
2401 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2402 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2403 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2404
2405 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2406 pVCpu->iem.s.offOpcode = offOpcode + 2;
2407 return VINF_SUCCESS;
2408}
2409
2410#endif /* !IEM_WITH_SETJMP */
2411
2412
2413/**
2414 * Fetches the next opcode word and zero extends it to a double word, returns
2415 * automatically on failure.
2416 *
2417 * @param a_pu32 Where to return the opcode double word.
2418 * @remark Implicitly references pVCpu.
2419 */
2420#ifndef IEM_WITH_SETJMP
2421# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2422 do \
2423 { \
2424 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2425 if (rcStrict2 != VINF_SUCCESS) \
2426 return rcStrict2; \
2427 } while (0)
2428#else
2429# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2430#endif
2431
2432#ifndef IEM_WITH_SETJMP
2433
2434/**
2435 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2436 *
2437 * @returns Strict VBox status code.
2438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2439 * @param pu64 Where to return the opcode quad word.
2440 */
2441DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2442{
2443 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2444 if (rcStrict == VINF_SUCCESS)
2445 {
2446 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2447 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2448 pVCpu->iem.s.offOpcode = offOpcode + 2;
2449 }
2450 else
2451 *pu64 = 0;
2452 return rcStrict;
2453}
2454
2455
2456/**
2457 * Fetches the next opcode word, zero extending it to a quad word.
2458 *
2459 * @returns Strict VBox status code.
2460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2461 * @param pu64 Where to return the opcode quad word.
2462 */
2463DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2464{
2465 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2466 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2467 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2468
2469 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2470 pVCpu->iem.s.offOpcode = offOpcode + 2;
2471 return VINF_SUCCESS;
2472}
2473
2474#endif /* !IEM_WITH_SETJMP */
2475
2476/**
2477 * Fetches the next opcode word and zero extends it to a quad word, returns
2478 * automatically on failure.
2479 *
2480 * @param a_pu64 Where to return the opcode quad word.
2481 * @remark Implicitly references pVCpu.
2482 */
2483#ifndef IEM_WITH_SETJMP
2484# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2485 do \
2486 { \
2487 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2488 if (rcStrict2 != VINF_SUCCESS) \
2489 return rcStrict2; \
2490 } while (0)
2491#else
2492# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2493#endif
2494
2495
2496#ifndef IEM_WITH_SETJMP
2497/**
2498 * Fetches the next signed word from the opcode stream.
2499 *
2500 * @returns Strict VBox status code.
2501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2502 * @param pi16 Where to return the signed word.
2503 */
2504DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2505{
2506 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2507}
2508#endif /* !IEM_WITH_SETJMP */
2509
2510
2511/**
2512 * Fetches the next signed word from the opcode stream, returning automatically
2513 * on failure.
2514 *
2515 * @param a_pi16 Where to return the signed word.
2516 * @remark Implicitly references pVCpu.
2517 */
2518#ifndef IEM_WITH_SETJMP
2519# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2520 do \
2521 { \
2522 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2523 if (rcStrict2 != VINF_SUCCESS) \
2524 return rcStrict2; \
2525 } while (0)
2526#else
2527# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2528#endif
2529
2530#ifndef IEM_WITH_SETJMP
2531
2532/**
2533 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2534 *
2535 * @returns Strict VBox status code.
2536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2537 * @param pu32 Where to return the opcode dword.
2538 */
2539DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2540{
2541 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2542 if (rcStrict == VINF_SUCCESS)
2543 {
2544 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2546 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2547# else
2548 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2549 pVCpu->iem.s.abOpcode[offOpcode + 1],
2550 pVCpu->iem.s.abOpcode[offOpcode + 2],
2551 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2552# endif
2553 pVCpu->iem.s.offOpcode = offOpcode + 4;
2554 }
2555 else
2556 *pu32 = 0;
2557 return rcStrict;
2558}
2559
2560
2561/**
2562 * Fetches the next opcode dword.
2563 *
2564 * @returns Strict VBox status code.
2565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2566 * @param pu32 Where to return the opcode double word.
2567 */
2568DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2569{
2570 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2571 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2572 {
2573 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2574# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2575 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2576# else
2577 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2578 pVCpu->iem.s.abOpcode[offOpcode + 1],
2579 pVCpu->iem.s.abOpcode[offOpcode + 2],
2580 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2581# endif
2582 return VINF_SUCCESS;
2583 }
2584 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2585}
2586
2587#else /* !IEM_WITH_SETJMP */
2588
2589/**
2590 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2591 *
2592 * @returns The opcode dword.
2593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2594 */
2595DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2596{
2597# ifdef IEM_WITH_CODE_TLB
2598 uint32_t u32;
2599 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2600 return u32;
2601# else
2602 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2603 if (rcStrict == VINF_SUCCESS)
2604 {
2605 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2606 pVCpu->iem.s.offOpcode = offOpcode + 4;
2607# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2608 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2609# else
2610 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2611 pVCpu->iem.s.abOpcode[offOpcode + 1],
2612 pVCpu->iem.s.abOpcode[offOpcode + 2],
2613 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2614# endif
2615 }
2616 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2617# endif
2618}
2619
2620
2621/**
2622 * Fetches the next opcode dword, longjmp on error.
2623 *
2624 * @returns The opcode dword.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 */
2627DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2628{
2629# ifdef IEM_WITH_CODE_TLB
2630 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2631 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2632 if (RT_LIKELY( pbBuf != NULL
2633 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2634 {
2635 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2636# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2637 return *(uint32_t const *)&pbBuf[offBuf];
2638# else
2639 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2640 pbBuf[offBuf + 1],
2641 pbBuf[offBuf + 2],
2642 pbBuf[offBuf + 3]);
2643# endif
2644 }
2645# else
2646 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2647 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2648 {
2649 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2650# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2651 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2652# else
2653 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2654 pVCpu->iem.s.abOpcode[offOpcode + 1],
2655 pVCpu->iem.s.abOpcode[offOpcode + 2],
2656 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2657# endif
2658 }
2659# endif
2660 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2661}
2662
2663#endif /* !IEM_WITH_SETJMP */
2664
2665
2666/**
2667 * Fetches the next opcode dword, returns automatically on failure.
2668 *
2669 * @param a_pu32 Where to return the opcode dword.
2670 * @remark Implicitly references pVCpu.
2671 */
2672#ifndef IEM_WITH_SETJMP
2673# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2674 do \
2675 { \
2676 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2677 if (rcStrict2 != VINF_SUCCESS) \
2678 return rcStrict2; \
2679 } while (0)
2680#else
2681# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2682#endif
2683
2684#ifndef IEM_WITH_SETJMP
2685
2686/**
2687 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2688 *
2689 * @returns Strict VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2691 * @param pu64 Where to return the opcode dword.
2692 */
2693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2694{
2695 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2696 if (rcStrict == VINF_SUCCESS)
2697 {
2698 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2699 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2700 pVCpu->iem.s.abOpcode[offOpcode + 1],
2701 pVCpu->iem.s.abOpcode[offOpcode + 2],
2702 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2703 pVCpu->iem.s.offOpcode = offOpcode + 4;
2704 }
2705 else
2706 *pu64 = 0;
2707 return rcStrict;
2708}
2709
2710
2711/**
2712 * Fetches the next opcode dword, zero extending it to a quad word.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pu64 Where to return the opcode quad word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2719{
2720 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2721 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2722 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2723
2724 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2725 pVCpu->iem.s.abOpcode[offOpcode + 1],
2726 pVCpu->iem.s.abOpcode[offOpcode + 2],
2727 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2728 pVCpu->iem.s.offOpcode = offOpcode + 4;
2729 return VINF_SUCCESS;
2730}
2731
2732#endif /* !IEM_WITH_SETJMP */
2733
2734
2735/**
2736 * Fetches the next opcode dword and zero extends it to a quad word, returns
2737 * automatically on failure.
2738 *
2739 * @param a_pu64 Where to return the opcode quad word.
2740 * @remark Implicitly references pVCpu.
2741 */
2742#ifndef IEM_WITH_SETJMP
2743# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2744 do \
2745 { \
2746 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2747 if (rcStrict2 != VINF_SUCCESS) \
2748 return rcStrict2; \
2749 } while (0)
2750#else
2751# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2752#endif
2753
2754
2755#ifndef IEM_WITH_SETJMP
2756/**
2757 * Fetches the next signed double word from the opcode stream.
2758 *
2759 * @returns Strict VBox status code.
2760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2761 * @param pi32 Where to return the signed double word.
2762 */
2763DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2764{
2765 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2766}
2767#endif
2768
2769/**
2770 * Fetches the next signed double word from the opcode stream, returning
2771 * automatically on failure.
2772 *
2773 * @param a_pi32 Where to return the signed double word.
2774 * @remark Implicitly references pVCpu.
2775 */
2776#ifndef IEM_WITH_SETJMP
2777# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2778 do \
2779 { \
2780 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2781 if (rcStrict2 != VINF_SUCCESS) \
2782 return rcStrict2; \
2783 } while (0)
2784#else
2785# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2786#endif
2787
2788#ifndef IEM_WITH_SETJMP
2789
2790/**
2791 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2792 *
2793 * @returns Strict VBox status code.
2794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2795 * @param pu64 Where to return the opcode qword.
2796 */
2797DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2798{
2799 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2800 if (rcStrict == VINF_SUCCESS)
2801 {
2802 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2803 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2804 pVCpu->iem.s.abOpcode[offOpcode + 1],
2805 pVCpu->iem.s.abOpcode[offOpcode + 2],
2806 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2807 pVCpu->iem.s.offOpcode = offOpcode + 4;
2808 }
2809 else
2810 *pu64 = 0;
2811 return rcStrict;
2812}
2813
2814
2815/**
2816 * Fetches the next opcode dword, sign extending it into a quad word.
2817 *
2818 * @returns Strict VBox status code.
2819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2820 * @param pu64 Where to return the opcode quad word.
2821 */
2822DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2823{
2824 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2825 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2826 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2827
2828 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2829 pVCpu->iem.s.abOpcode[offOpcode + 1],
2830 pVCpu->iem.s.abOpcode[offOpcode + 2],
2831 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2832 *pu64 = i32;
2833 pVCpu->iem.s.offOpcode = offOpcode + 4;
2834 return VINF_SUCCESS;
2835}
2836
2837#endif /* !IEM_WITH_SETJMP */
2838
2839
2840/**
2841 * Fetches the next opcode double word and sign extends it to a quad word,
2842 * returns automatically on failure.
2843 *
2844 * @param a_pu64 Where to return the opcode quad word.
2845 * @remark Implicitly references pVCpu.
2846 */
2847#ifndef IEM_WITH_SETJMP
2848# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2849 do \
2850 { \
2851 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2852 if (rcStrict2 != VINF_SUCCESS) \
2853 return rcStrict2; \
2854 } while (0)
2855#else
2856# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2857#endif
2858
2859#ifndef IEM_WITH_SETJMP
2860
2861/**
2862 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2863 *
2864 * @returns Strict VBox status code.
2865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2866 * @param pu64 Where to return the opcode qword.
2867 */
2868DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2869{
2870 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2871 if (rcStrict == VINF_SUCCESS)
2872 {
2873 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2874# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2875 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2876# else
2877 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2878 pVCpu->iem.s.abOpcode[offOpcode + 1],
2879 pVCpu->iem.s.abOpcode[offOpcode + 2],
2880 pVCpu->iem.s.abOpcode[offOpcode + 3],
2881 pVCpu->iem.s.abOpcode[offOpcode + 4],
2882 pVCpu->iem.s.abOpcode[offOpcode + 5],
2883 pVCpu->iem.s.abOpcode[offOpcode + 6],
2884 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2885# endif
2886 pVCpu->iem.s.offOpcode = offOpcode + 8;
2887 }
2888 else
2889 *pu64 = 0;
2890 return rcStrict;
2891}
2892
2893
2894/**
2895 * Fetches the next opcode qword.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode qword.
2900 */
2901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2902{
2903 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2904 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2905 {
2906# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2907 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2908# else
2909 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2910 pVCpu->iem.s.abOpcode[offOpcode + 1],
2911 pVCpu->iem.s.abOpcode[offOpcode + 2],
2912 pVCpu->iem.s.abOpcode[offOpcode + 3],
2913 pVCpu->iem.s.abOpcode[offOpcode + 4],
2914 pVCpu->iem.s.abOpcode[offOpcode + 5],
2915 pVCpu->iem.s.abOpcode[offOpcode + 6],
2916 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2917# endif
2918 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2919 return VINF_SUCCESS;
2920 }
2921 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2922}
2923
2924#else /* IEM_WITH_SETJMP */
2925
2926/**
2927 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2928 *
2929 * @returns The opcode qword.
2930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2931 */
2932DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2933{
2934# ifdef IEM_WITH_CODE_TLB
2935 uint64_t u64;
2936 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2937 return u64;
2938# else
2939 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2940 if (rcStrict == VINF_SUCCESS)
2941 {
2942 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2943 pVCpu->iem.s.offOpcode = offOpcode + 8;
2944# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2945 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2946# else
2947 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2948 pVCpu->iem.s.abOpcode[offOpcode + 1],
2949 pVCpu->iem.s.abOpcode[offOpcode + 2],
2950 pVCpu->iem.s.abOpcode[offOpcode + 3],
2951 pVCpu->iem.s.abOpcode[offOpcode + 4],
2952 pVCpu->iem.s.abOpcode[offOpcode + 5],
2953 pVCpu->iem.s.abOpcode[offOpcode + 6],
2954 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2955# endif
2956 }
2957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2958# endif
2959}
2960
2961
2962/**
2963 * Fetches the next opcode qword, longjmp on error.
2964 *
2965 * @returns The opcode qword.
2966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2967 */
2968DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2969{
2970# ifdef IEM_WITH_CODE_TLB
2971 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2972 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2973 if (RT_LIKELY( pbBuf != NULL
2974 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2975 {
2976 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2977# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2978 return *(uint64_t const *)&pbBuf[offBuf];
2979# else
2980 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2981 pbBuf[offBuf + 1],
2982 pbBuf[offBuf + 2],
2983 pbBuf[offBuf + 3],
2984 pbBuf[offBuf + 4],
2985 pbBuf[offBuf + 5],
2986 pbBuf[offBuf + 6],
2987 pbBuf[offBuf + 7]);
2988# endif
2989 }
2990# else
2991 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2992 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2993 {
2994 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2995# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2996 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2997# else
2998 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2999 pVCpu->iem.s.abOpcode[offOpcode + 1],
3000 pVCpu->iem.s.abOpcode[offOpcode + 2],
3001 pVCpu->iem.s.abOpcode[offOpcode + 3],
3002 pVCpu->iem.s.abOpcode[offOpcode + 4],
3003 pVCpu->iem.s.abOpcode[offOpcode + 5],
3004 pVCpu->iem.s.abOpcode[offOpcode + 6],
3005 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3006# endif
3007 }
3008# endif
3009 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3010}
3011
3012#endif /* IEM_WITH_SETJMP */
3013
3014/**
3015 * Fetches the next opcode quad word, returns automatically on failure.
3016 *
3017 * @param a_pu64 Where to return the opcode quad word.
3018 * @remark Implicitly references pVCpu.
3019 */
3020#ifndef IEM_WITH_SETJMP
3021# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3022 do \
3023 { \
3024 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3025 if (rcStrict2 != VINF_SUCCESS) \
3026 return rcStrict2; \
3027 } while (0)
3028#else
3029# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3030#endif
3031
3032
3033/** @name Misc Worker Functions.
3034 * @{
3035 */
3036
3037
3038/**
3039 * Validates a new SS segment.
3040 *
3041 * @returns VBox strict status code.
3042 * @param pVCpu The cross context virtual CPU structure of the
3043 * calling thread.
3044 * @param pCtx The CPU context.
3045 * @param NewSS The new SS selctor.
3046 * @param uCpl The CPL to load the stack for.
3047 * @param pDesc Where to return the descriptor.
3048 */
3049IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3050{
3051 NOREF(pCtx);
3052
3053 /* Null selectors are not allowed (we're not called for dispatching
3054 interrupts with SS=0 in long mode). */
3055 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3056 {
3057 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3058 return iemRaiseTaskSwitchFault0(pVCpu);
3059 }
3060
3061 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3062 if ((NewSS & X86_SEL_RPL) != uCpl)
3063 {
3064 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3065 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3066 }
3067
3068 /*
3069 * Read the descriptor.
3070 */
3071 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3072 if (rcStrict != VINF_SUCCESS)
3073 return rcStrict;
3074
3075 /*
3076 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3077 */
3078 if (!pDesc->Legacy.Gen.u1DescType)
3079 {
3080 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3081 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3082 }
3083
3084 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3085 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3086 {
3087 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3088 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3089 }
3090 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3091 {
3092 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3093 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3094 }
3095
3096 /* Is it there? */
3097 /** @todo testcase: Is this checked before the canonical / limit check below? */
3098 if (!pDesc->Legacy.Gen.u1Present)
3099 {
3100 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3101 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3102 }
3103
3104 return VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3110 * not.
3111 *
3112 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param a_pCtx The CPU context.
3114 */
3115#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3116# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3117 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3118 ? (a_pCtx)->eflags.u \
3119 : CPUMRawGetEFlags(a_pVCpu) )
3120#else
3121# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3122 ( (a_pCtx)->eflags.u )
3123#endif
3124
3125/**
3126 * Updates the EFLAGS in the correct manner wrt. PATM.
3127 *
3128 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3129 * @param a_pCtx The CPU context.
3130 * @param a_fEfl The new EFLAGS.
3131 */
3132#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3133# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3134 do { \
3135 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3136 (a_pCtx)->eflags.u = (a_fEfl); \
3137 else \
3138 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3139 } while (0)
3140#else
3141# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3142 do { \
3143 (a_pCtx)->eflags.u = (a_fEfl); \
3144 } while (0)
3145#endif
3146
3147
3148/** @} */
3149
3150/** @name Raising Exceptions.
3151 *
3152 * @{
3153 */
3154
3155/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3156 * @{ */
3157/** CPU exception. */
3158#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3159/** External interrupt (from PIC, APIC, whatever). */
3160#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3161/** Software interrupt (int or into, not bound).
3162 * Returns to the following instruction */
3163#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3164/** Takes an error code. */
3165#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3166/** Takes a CR2. */
3167#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3168/** Generated by the breakpoint instruction. */
3169#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3170/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3171#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3172/** @} */
3173
3174
3175/**
3176 * Loads the specified stack far pointer from the TSS.
3177 *
3178 * @returns VBox strict status code.
3179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3180 * @param pCtx The CPU context.
3181 * @param uCpl The CPL to load the stack for.
3182 * @param pSelSS Where to return the new stack segment.
3183 * @param puEsp Where to return the new stack pointer.
3184 */
3185IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3186 PRTSEL pSelSS, uint32_t *puEsp)
3187{
3188 VBOXSTRICTRC rcStrict;
3189 Assert(uCpl < 4);
3190
3191 switch (pCtx->tr.Attr.n.u4Type)
3192 {
3193 /*
3194 * 16-bit TSS (X86TSS16).
3195 */
3196 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3197 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3198 {
3199 uint32_t off = uCpl * 4 + 2;
3200 if (off + 4 <= pCtx->tr.u32Limit)
3201 {
3202 /** @todo check actual access pattern here. */
3203 uint32_t u32Tmp = 0; /* gcc maybe... */
3204 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3205 if (rcStrict == VINF_SUCCESS)
3206 {
3207 *puEsp = RT_LOWORD(u32Tmp);
3208 *pSelSS = RT_HIWORD(u32Tmp);
3209 return VINF_SUCCESS;
3210 }
3211 }
3212 else
3213 {
3214 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3215 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3216 }
3217 break;
3218 }
3219
3220 /*
3221 * 32-bit TSS (X86TSS32).
3222 */
3223 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3224 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3225 {
3226 uint32_t off = uCpl * 8 + 4;
3227 if (off + 7 <= pCtx->tr.u32Limit)
3228 {
3229/** @todo check actual access pattern here. */
3230 uint64_t u64Tmp;
3231 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3232 if (rcStrict == VINF_SUCCESS)
3233 {
3234 *puEsp = u64Tmp & UINT32_MAX;
3235 *pSelSS = (RTSEL)(u64Tmp >> 32);
3236 return VINF_SUCCESS;
3237 }
3238 }
3239 else
3240 {
3241 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3242 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3243 }
3244 break;
3245 }
3246
3247 default:
3248 AssertFailed();
3249 rcStrict = VERR_IEM_IPE_4;
3250 break;
3251 }
3252
3253 *puEsp = 0; /* make gcc happy */
3254 *pSelSS = 0; /* make gcc happy */
3255 return rcStrict;
3256}
3257
3258
3259/**
3260 * Loads the specified stack pointer from the 64-bit TSS.
3261 *
3262 * @returns VBox strict status code.
3263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3264 * @param pCtx The CPU context.
3265 * @param uCpl The CPL to load the stack for.
3266 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3267 * @param puRsp Where to return the new stack pointer.
3268 */
3269IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3270{
3271 Assert(uCpl < 4);
3272 Assert(uIst < 8);
3273 *puRsp = 0; /* make gcc happy */
3274
3275 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3276
3277 uint32_t off;
3278 if (uIst)
3279 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3280 else
3281 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3282 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3283 {
3284 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3285 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3286 }
3287
3288 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3289}
3290
3291
3292/**
3293 * Adjust the CPU state according to the exception being raised.
3294 *
3295 * @param pCtx The CPU context.
3296 * @param u8Vector The exception that has been raised.
3297 */
3298DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3299{
3300 switch (u8Vector)
3301 {
3302 case X86_XCPT_DB:
3303 pCtx->dr[7] &= ~X86_DR7_GD;
3304 break;
3305 /** @todo Read the AMD and Intel exception reference... */
3306 }
3307}
3308
3309
3310/**
3311 * Implements exceptions and interrupts for real mode.
3312 *
3313 * @returns VBox strict status code.
3314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3315 * @param pCtx The CPU context.
3316 * @param cbInstr The number of bytes to offset rIP by in the return
3317 * address.
3318 * @param u8Vector The interrupt / exception vector number.
3319 * @param fFlags The flags.
3320 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3321 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3322 */
3323IEM_STATIC VBOXSTRICTRC
3324iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3325 PCPUMCTX pCtx,
3326 uint8_t cbInstr,
3327 uint8_t u8Vector,
3328 uint32_t fFlags,
3329 uint16_t uErr,
3330 uint64_t uCr2)
3331{
3332 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3333 NOREF(uErr); NOREF(uCr2);
3334
3335 /*
3336 * Read the IDT entry.
3337 */
3338 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3339 {
3340 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3341 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3342 }
3343 RTFAR16 Idte;
3344 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3345 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3346 return rcStrict;
3347
3348 /*
3349 * Push the stack frame.
3350 */
3351 uint16_t *pu16Frame;
3352 uint64_t uNewRsp;
3353 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3354 if (rcStrict != VINF_SUCCESS)
3355 return rcStrict;
3356
3357 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3358#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3359 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3360 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3361 fEfl |= UINT16_C(0xf000);
3362#endif
3363 pu16Frame[2] = (uint16_t)fEfl;
3364 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3365 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3366 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3367 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3368 return rcStrict;
3369
3370 /*
3371 * Load the vector address into cs:ip and make exception specific state
3372 * adjustments.
3373 */
3374 pCtx->cs.Sel = Idte.sel;
3375 pCtx->cs.ValidSel = Idte.sel;
3376 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3377 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3378 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3379 pCtx->rip = Idte.off;
3380 fEfl &= ~X86_EFL_IF;
3381 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3382
3383 /** @todo do we actually do this in real mode? */
3384 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3385 iemRaiseXcptAdjustState(pCtx, u8Vector);
3386
3387 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3388}
3389
3390
3391/**
3392 * Loads a NULL data selector into when coming from V8086 mode.
3393 *
3394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3395 * @param pSReg Pointer to the segment register.
3396 */
3397IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3398{
3399 pSReg->Sel = 0;
3400 pSReg->ValidSel = 0;
3401 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3402 {
3403 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3404 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3405 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3406 }
3407 else
3408 {
3409 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3410 /** @todo check this on AMD-V */
3411 pSReg->u64Base = 0;
3412 pSReg->u32Limit = 0;
3413 }
3414}
3415
3416
3417/**
3418 * Loads a segment selector during a task switch in V8086 mode.
3419 *
3420 * @param pSReg Pointer to the segment register.
3421 * @param uSel The selector value to load.
3422 */
3423IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3424{
3425 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3426 pSReg->Sel = uSel;
3427 pSReg->ValidSel = uSel;
3428 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3429 pSReg->u64Base = uSel << 4;
3430 pSReg->u32Limit = 0xffff;
3431 pSReg->Attr.u = 0xf3;
3432}
3433
3434
3435/**
3436 * Loads a NULL data selector into a selector register, both the hidden and
3437 * visible parts, in protected mode.
3438 *
3439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3440 * @param pSReg Pointer to the segment register.
3441 * @param uRpl The RPL.
3442 */
3443IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3444{
3445 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3446 * data selector in protected mode. */
3447 pSReg->Sel = uRpl;
3448 pSReg->ValidSel = uRpl;
3449 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3450 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3451 {
3452 /* VT-x (Intel 3960x) observed doing something like this. */
3453 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3454 pSReg->u32Limit = UINT32_MAX;
3455 pSReg->u64Base = 0;
3456 }
3457 else
3458 {
3459 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3460 pSReg->u32Limit = 0;
3461 pSReg->u64Base = 0;
3462 }
3463}
3464
3465
3466/**
3467 * Loads a segment selector during a task switch in protected mode.
3468 *
3469 * In this task switch scenario, we would throw \#TS exceptions rather than
3470 * \#GPs.
3471 *
3472 * @returns VBox strict status code.
3473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3474 * @param pSReg Pointer to the segment register.
3475 * @param uSel The new selector value.
3476 *
3477 * @remarks This does _not_ handle CS or SS.
3478 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3479 */
3480IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3481{
3482 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3483
3484 /* Null data selector. */
3485 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3486 {
3487 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3489 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3490 return VINF_SUCCESS;
3491 }
3492
3493 /* Fetch the descriptor. */
3494 IEMSELDESC Desc;
3495 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3496 if (rcStrict != VINF_SUCCESS)
3497 {
3498 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3499 VBOXSTRICTRC_VAL(rcStrict)));
3500 return rcStrict;
3501 }
3502
3503 /* Must be a data segment or readable code segment. */
3504 if ( !Desc.Legacy.Gen.u1DescType
3505 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3506 {
3507 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3508 Desc.Legacy.Gen.u4Type));
3509 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3510 }
3511
3512 /* Check privileges for data segments and non-conforming code segments. */
3513 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3514 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3515 {
3516 /* The RPL and the new CPL must be less than or equal to the DPL. */
3517 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3518 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3519 {
3520 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3521 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3522 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3523 }
3524 }
3525
3526 /* Is it there? */
3527 if (!Desc.Legacy.Gen.u1Present)
3528 {
3529 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3530 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3531 }
3532
3533 /* The base and limit. */
3534 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3535 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3536
3537 /*
3538 * Ok, everything checked out fine. Now set the accessed bit before
3539 * committing the result into the registers.
3540 */
3541 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3542 {
3543 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3547 }
3548
3549 /* Commit */
3550 pSReg->Sel = uSel;
3551 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3552 pSReg->u32Limit = cbLimit;
3553 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3554 pSReg->ValidSel = uSel;
3555 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3556 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3557 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3558
3559 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3560 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3561 return VINF_SUCCESS;
3562}
3563
3564
3565/**
3566 * Performs a task switch.
3567 *
3568 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3569 * caller is responsible for performing the necessary checks (like DPL, TSS
3570 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3571 * reference for JMP, CALL, IRET.
3572 *
3573 * If the task switch is the due to a software interrupt or hardware exception,
3574 * the caller is responsible for validating the TSS selector and descriptor. See
3575 * Intel Instruction reference for INT n.
3576 *
3577 * @returns VBox strict status code.
3578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3579 * @param pCtx The CPU context.
3580 * @param enmTaskSwitch What caused this task switch.
3581 * @param uNextEip The EIP effective after the task switch.
3582 * @param fFlags The flags.
3583 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3584 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3585 * @param SelTSS The TSS selector of the new task.
3586 * @param pNewDescTSS Pointer to the new TSS descriptor.
3587 */
3588IEM_STATIC VBOXSTRICTRC
3589iemTaskSwitch(PVMCPU pVCpu,
3590 PCPUMCTX pCtx,
3591 IEMTASKSWITCH enmTaskSwitch,
3592 uint32_t uNextEip,
3593 uint32_t fFlags,
3594 uint16_t uErr,
3595 uint64_t uCr2,
3596 RTSEL SelTSS,
3597 PIEMSELDESC pNewDescTSS)
3598{
3599 Assert(!IEM_IS_REAL_MODE(pVCpu));
3600 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3601
3602 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3603 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3604 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3605 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3606 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3607
3608 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3609 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3610
3611 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3612 fIsNewTSS386, pCtx->eip, uNextEip));
3613
3614 /* Update CR2 in case it's a page-fault. */
3615 /** @todo This should probably be done much earlier in IEM/PGM. See
3616 * @bugref{5653#c49}. */
3617 if (fFlags & IEM_XCPT_FLAGS_CR2)
3618 pCtx->cr2 = uCr2;
3619
3620 /*
3621 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3622 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3623 */
3624 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3625 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3626 if (uNewTSSLimit < uNewTSSLimitMin)
3627 {
3628 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3629 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3630 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3631 }
3632
3633 /*
3634 * Check the current TSS limit. The last written byte to the current TSS during the
3635 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3636 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3637 *
3638 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3639 * end up with smaller than "legal" TSS limits.
3640 */
3641 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3642 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3643 if (uCurTSSLimit < uCurTSSLimitMin)
3644 {
3645 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3646 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3648 }
3649
3650 /*
3651 * Verify that the new TSS can be accessed and map it. Map only the required contents
3652 * and not the entire TSS.
3653 */
3654 void *pvNewTSS;
3655 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3656 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3657 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3658 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3659 * not perform correct translation if this happens. See Intel spec. 7.2.1
3660 * "Task-State Segment" */
3661 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3662 if (rcStrict != VINF_SUCCESS)
3663 {
3664 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3665 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3666 return rcStrict;
3667 }
3668
3669 /*
3670 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3671 */
3672 uint32_t u32EFlags = pCtx->eflags.u32;
3673 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3674 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3675 {
3676 PX86DESC pDescCurTSS;
3677 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3678 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3679 if (rcStrict != VINF_SUCCESS)
3680 {
3681 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3682 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3683 return rcStrict;
3684 }
3685
3686 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3687 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3688 if (rcStrict != VINF_SUCCESS)
3689 {
3690 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3691 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3692 return rcStrict;
3693 }
3694
3695 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3696 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3697 {
3698 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3699 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3700 u32EFlags &= ~X86_EFL_NT;
3701 }
3702 }
3703
3704 /*
3705 * Save the CPU state into the current TSS.
3706 */
3707 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3708 if (GCPtrNewTSS == GCPtrCurTSS)
3709 {
3710 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3711 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3712 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3713 }
3714 if (fIsNewTSS386)
3715 {
3716 /*
3717 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3718 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3719 */
3720 void *pvCurTSS32;
3721 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3722 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3723 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3724 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3725 if (rcStrict != VINF_SUCCESS)
3726 {
3727 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3728 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3729 return rcStrict;
3730 }
3731
3732 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3733 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3734 pCurTSS32->eip = uNextEip;
3735 pCurTSS32->eflags = u32EFlags;
3736 pCurTSS32->eax = pCtx->eax;
3737 pCurTSS32->ecx = pCtx->ecx;
3738 pCurTSS32->edx = pCtx->edx;
3739 pCurTSS32->ebx = pCtx->ebx;
3740 pCurTSS32->esp = pCtx->esp;
3741 pCurTSS32->ebp = pCtx->ebp;
3742 pCurTSS32->esi = pCtx->esi;
3743 pCurTSS32->edi = pCtx->edi;
3744 pCurTSS32->es = pCtx->es.Sel;
3745 pCurTSS32->cs = pCtx->cs.Sel;
3746 pCurTSS32->ss = pCtx->ss.Sel;
3747 pCurTSS32->ds = pCtx->ds.Sel;
3748 pCurTSS32->fs = pCtx->fs.Sel;
3749 pCurTSS32->gs = pCtx->gs.Sel;
3750
3751 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3752 if (rcStrict != VINF_SUCCESS)
3753 {
3754 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3755 VBOXSTRICTRC_VAL(rcStrict)));
3756 return rcStrict;
3757 }
3758 }
3759 else
3760 {
3761 /*
3762 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3763 */
3764 void *pvCurTSS16;
3765 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3766 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3767 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3768 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3769 if (rcStrict != VINF_SUCCESS)
3770 {
3771 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3772 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3773 return rcStrict;
3774 }
3775
3776 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3777 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3778 pCurTSS16->ip = uNextEip;
3779 pCurTSS16->flags = u32EFlags;
3780 pCurTSS16->ax = pCtx->ax;
3781 pCurTSS16->cx = pCtx->cx;
3782 pCurTSS16->dx = pCtx->dx;
3783 pCurTSS16->bx = pCtx->bx;
3784 pCurTSS16->sp = pCtx->sp;
3785 pCurTSS16->bp = pCtx->bp;
3786 pCurTSS16->si = pCtx->si;
3787 pCurTSS16->di = pCtx->di;
3788 pCurTSS16->es = pCtx->es.Sel;
3789 pCurTSS16->cs = pCtx->cs.Sel;
3790 pCurTSS16->ss = pCtx->ss.Sel;
3791 pCurTSS16->ds = pCtx->ds.Sel;
3792
3793 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3794 if (rcStrict != VINF_SUCCESS)
3795 {
3796 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3797 VBOXSTRICTRC_VAL(rcStrict)));
3798 return rcStrict;
3799 }
3800 }
3801
3802 /*
3803 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3804 */
3805 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3806 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3807 {
3808 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3809 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3810 pNewTSS->selPrev = pCtx->tr.Sel;
3811 }
3812
3813 /*
3814 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3815 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3816 */
3817 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3818 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3819 bool fNewDebugTrap;
3820 if (fIsNewTSS386)
3821 {
3822 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3823 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3824 uNewEip = pNewTSS32->eip;
3825 uNewEflags = pNewTSS32->eflags;
3826 uNewEax = pNewTSS32->eax;
3827 uNewEcx = pNewTSS32->ecx;
3828 uNewEdx = pNewTSS32->edx;
3829 uNewEbx = pNewTSS32->ebx;
3830 uNewEsp = pNewTSS32->esp;
3831 uNewEbp = pNewTSS32->ebp;
3832 uNewEsi = pNewTSS32->esi;
3833 uNewEdi = pNewTSS32->edi;
3834 uNewES = pNewTSS32->es;
3835 uNewCS = pNewTSS32->cs;
3836 uNewSS = pNewTSS32->ss;
3837 uNewDS = pNewTSS32->ds;
3838 uNewFS = pNewTSS32->fs;
3839 uNewGS = pNewTSS32->gs;
3840 uNewLdt = pNewTSS32->selLdt;
3841 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3842 }
3843 else
3844 {
3845 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3846 uNewCr3 = 0;
3847 uNewEip = pNewTSS16->ip;
3848 uNewEflags = pNewTSS16->flags;
3849 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3850 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3851 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3852 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3853 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3854 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3855 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3856 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3857 uNewES = pNewTSS16->es;
3858 uNewCS = pNewTSS16->cs;
3859 uNewSS = pNewTSS16->ss;
3860 uNewDS = pNewTSS16->ds;
3861 uNewFS = 0;
3862 uNewGS = 0;
3863 uNewLdt = pNewTSS16->selLdt;
3864 fNewDebugTrap = false;
3865 }
3866
3867 if (GCPtrNewTSS == GCPtrCurTSS)
3868 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3869 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3870
3871 /*
3872 * We're done accessing the new TSS.
3873 */
3874 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3875 if (rcStrict != VINF_SUCCESS)
3876 {
3877 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3878 return rcStrict;
3879 }
3880
3881 /*
3882 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3883 */
3884 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3885 {
3886 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3887 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3888 if (rcStrict != VINF_SUCCESS)
3889 {
3890 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3891 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3892 return rcStrict;
3893 }
3894
3895 /* Check that the descriptor indicates the new TSS is available (not busy). */
3896 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3897 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3898 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3899
3900 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3901 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3902 if (rcStrict != VINF_SUCCESS)
3903 {
3904 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3905 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3906 return rcStrict;
3907 }
3908 }
3909
3910 /*
3911 * From this point on, we're technically in the new task. We will defer exceptions
3912 * until the completion of the task switch but before executing any instructions in the new task.
3913 */
3914 pCtx->tr.Sel = SelTSS;
3915 pCtx->tr.ValidSel = SelTSS;
3916 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3917 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3918 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3919 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3920 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3921
3922 /* Set the busy bit in TR. */
3923 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3924 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3925 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3926 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3927 {
3928 uNewEflags |= X86_EFL_NT;
3929 }
3930
3931 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3932 pCtx->cr0 |= X86_CR0_TS;
3933 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3934
3935 pCtx->eip = uNewEip;
3936 pCtx->eax = uNewEax;
3937 pCtx->ecx = uNewEcx;
3938 pCtx->edx = uNewEdx;
3939 pCtx->ebx = uNewEbx;
3940 pCtx->esp = uNewEsp;
3941 pCtx->ebp = uNewEbp;
3942 pCtx->esi = uNewEsi;
3943 pCtx->edi = uNewEdi;
3944
3945 uNewEflags &= X86_EFL_LIVE_MASK;
3946 uNewEflags |= X86_EFL_RA1_MASK;
3947 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3948
3949 /*
3950 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3951 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3952 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3953 */
3954 pCtx->es.Sel = uNewES;
3955 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3956
3957 pCtx->cs.Sel = uNewCS;
3958 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3959
3960 pCtx->ss.Sel = uNewSS;
3961 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3962
3963 pCtx->ds.Sel = uNewDS;
3964 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3965
3966 pCtx->fs.Sel = uNewFS;
3967 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3968
3969 pCtx->gs.Sel = uNewGS;
3970 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3971 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3972
3973 pCtx->ldtr.Sel = uNewLdt;
3974 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3975 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3976 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3977
3978 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3979 {
3980 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3981 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3982 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3983 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3984 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3985 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3986 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3987 }
3988
3989 /*
3990 * Switch CR3 for the new task.
3991 */
3992 if ( fIsNewTSS386
3993 && (pCtx->cr0 & X86_CR0_PG))
3994 {
3995 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3996 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3997 {
3998 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3999 AssertRCSuccessReturn(rc, rc);
4000 }
4001 else
4002 pCtx->cr3 = uNewCr3;
4003
4004 /* Inform PGM. */
4005 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4006 {
4007 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4008 AssertRCReturn(rc, rc);
4009 /* ignore informational status codes */
4010 }
4011 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4012 }
4013
4014 /*
4015 * Switch LDTR for the new task.
4016 */
4017 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4018 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4019 else
4020 {
4021 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4022
4023 IEMSELDESC DescNewLdt;
4024 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4025 if (rcStrict != VINF_SUCCESS)
4026 {
4027 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4028 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4029 return rcStrict;
4030 }
4031 if ( !DescNewLdt.Legacy.Gen.u1Present
4032 || DescNewLdt.Legacy.Gen.u1DescType
4033 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4034 {
4035 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4036 uNewLdt, DescNewLdt.Legacy.u));
4037 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4038 }
4039
4040 pCtx->ldtr.ValidSel = uNewLdt;
4041 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4042 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4043 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4044 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4045 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4046 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4048 }
4049
4050 IEMSELDESC DescSS;
4051 if (IEM_IS_V86_MODE(pVCpu))
4052 {
4053 pVCpu->iem.s.uCpl = 3;
4054 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4055 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4056 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4057 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4058 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4059 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4060
4061 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4062 DescSS.Legacy.u = 0;
4063 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4064 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4065 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4066 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4067 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4068 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4069 DescSS.Legacy.Gen.u2Dpl = 3;
4070 }
4071 else
4072 {
4073 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4074
4075 /*
4076 * Load the stack segment for the new task.
4077 */
4078 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4079 {
4080 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4081 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4082 }
4083
4084 /* Fetch the descriptor. */
4085 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4086 if (rcStrict != VINF_SUCCESS)
4087 {
4088 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4089 VBOXSTRICTRC_VAL(rcStrict)));
4090 return rcStrict;
4091 }
4092
4093 /* SS must be a data segment and writable. */
4094 if ( !DescSS.Legacy.Gen.u1DescType
4095 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4096 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4097 {
4098 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4099 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4100 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4101 }
4102
4103 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4104 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4105 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4106 {
4107 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4108 uNewCpl));
4109 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4110 }
4111
4112 /* Is it there? */
4113 if (!DescSS.Legacy.Gen.u1Present)
4114 {
4115 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4116 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4117 }
4118
4119 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4120 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4121
4122 /* Set the accessed bit before committing the result into SS. */
4123 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4124 {
4125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4126 if (rcStrict != VINF_SUCCESS)
4127 return rcStrict;
4128 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4129 }
4130
4131 /* Commit SS. */
4132 pCtx->ss.Sel = uNewSS;
4133 pCtx->ss.ValidSel = uNewSS;
4134 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4135 pCtx->ss.u32Limit = cbLimit;
4136 pCtx->ss.u64Base = u64Base;
4137 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4139
4140 /* CPL has changed, update IEM before loading rest of segments. */
4141 pVCpu->iem.s.uCpl = uNewCpl;
4142
4143 /*
4144 * Load the data segments for the new task.
4145 */
4146 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4147 if (rcStrict != VINF_SUCCESS)
4148 return rcStrict;
4149 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4150 if (rcStrict != VINF_SUCCESS)
4151 return rcStrict;
4152 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4153 if (rcStrict != VINF_SUCCESS)
4154 return rcStrict;
4155 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4156 if (rcStrict != VINF_SUCCESS)
4157 return rcStrict;
4158
4159 /*
4160 * Load the code segment for the new task.
4161 */
4162 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4163 {
4164 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4165 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4166 }
4167
4168 /* Fetch the descriptor. */
4169 IEMSELDESC DescCS;
4170 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4171 if (rcStrict != VINF_SUCCESS)
4172 {
4173 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4174 return rcStrict;
4175 }
4176
4177 /* CS must be a code segment. */
4178 if ( !DescCS.Legacy.Gen.u1DescType
4179 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4180 {
4181 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4182 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4184 }
4185
4186 /* For conforming CS, DPL must be less than or equal to the RPL. */
4187 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4188 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4189 {
4190 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4191 DescCS.Legacy.Gen.u2Dpl));
4192 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4193 }
4194
4195 /* For non-conforming CS, DPL must match RPL. */
4196 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4197 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4198 {
4199 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4200 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4201 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4202 }
4203
4204 /* Is it there? */
4205 if (!DescCS.Legacy.Gen.u1Present)
4206 {
4207 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4208 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4209 }
4210
4211 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4212 u64Base = X86DESC_BASE(&DescCS.Legacy);
4213
4214 /* Set the accessed bit before committing the result into CS. */
4215 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4216 {
4217 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4218 if (rcStrict != VINF_SUCCESS)
4219 return rcStrict;
4220 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4221 }
4222
4223 /* Commit CS. */
4224 pCtx->cs.Sel = uNewCS;
4225 pCtx->cs.ValidSel = uNewCS;
4226 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4227 pCtx->cs.u32Limit = cbLimit;
4228 pCtx->cs.u64Base = u64Base;
4229 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4230 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4231 }
4232
4233 /** @todo Debug trap. */
4234 if (fIsNewTSS386 && fNewDebugTrap)
4235 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4236
4237 /*
4238 * Construct the error code masks based on what caused this task switch.
4239 * See Intel Instruction reference for INT.
4240 */
4241 uint16_t uExt;
4242 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4243 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4244 {
4245 uExt = 1;
4246 }
4247 else
4248 uExt = 0;
4249
4250 /*
4251 * Push any error code on to the new stack.
4252 */
4253 if (fFlags & IEM_XCPT_FLAGS_ERR)
4254 {
4255 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4256 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4257 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4258
4259 /* Check that there is sufficient space on the stack. */
4260 /** @todo Factor out segment limit checking for normal/expand down segments
4261 * into a separate function. */
4262 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4263 {
4264 if ( pCtx->esp - 1 > cbLimitSS
4265 || pCtx->esp < cbStackFrame)
4266 {
4267 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4268 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4269 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4270 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4271 }
4272 }
4273 else
4274 {
4275 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4276 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4277 {
4278 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4279 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4280 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4281 }
4282 }
4283
4284
4285 if (fIsNewTSS386)
4286 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4287 else
4288 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4289 if (rcStrict != VINF_SUCCESS)
4290 {
4291 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4292 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4293 return rcStrict;
4294 }
4295 }
4296
4297 /* Check the new EIP against the new CS limit. */
4298 if (pCtx->eip > pCtx->cs.u32Limit)
4299 {
4300 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4301 pCtx->eip, pCtx->cs.u32Limit));
4302 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4303 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4304 }
4305
4306 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4307 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4308}
4309
4310
4311/**
4312 * Implements exceptions and interrupts for protected mode.
4313 *
4314 * @returns VBox strict status code.
4315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4316 * @param pCtx The CPU context.
4317 * @param cbInstr The number of bytes to offset rIP by in the return
4318 * address.
4319 * @param u8Vector The interrupt / exception vector number.
4320 * @param fFlags The flags.
4321 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4322 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4323 */
4324IEM_STATIC VBOXSTRICTRC
4325iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4326 PCPUMCTX pCtx,
4327 uint8_t cbInstr,
4328 uint8_t u8Vector,
4329 uint32_t fFlags,
4330 uint16_t uErr,
4331 uint64_t uCr2)
4332{
4333 /*
4334 * Read the IDT entry.
4335 */
4336 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4337 {
4338 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4339 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4340 }
4341 X86DESC Idte;
4342 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4343 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4344 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4345 return rcStrict;
4346 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4347 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4348 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4349
4350 /*
4351 * Check the descriptor type, DPL and such.
4352 * ASSUMES this is done in the same order as described for call-gate calls.
4353 */
4354 if (Idte.Gate.u1DescType)
4355 {
4356 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4357 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4358 }
4359 bool fTaskGate = false;
4360 uint8_t f32BitGate = true;
4361 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4362 switch (Idte.Gate.u4Type)
4363 {
4364 case X86_SEL_TYPE_SYS_UNDEFINED:
4365 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4366 case X86_SEL_TYPE_SYS_LDT:
4367 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4368 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4369 case X86_SEL_TYPE_SYS_UNDEFINED2:
4370 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4371 case X86_SEL_TYPE_SYS_UNDEFINED3:
4372 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4373 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4374 case X86_SEL_TYPE_SYS_UNDEFINED4:
4375 {
4376 /** @todo check what actually happens when the type is wrong...
4377 * esp. call gates. */
4378 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4379 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4380 }
4381
4382 case X86_SEL_TYPE_SYS_286_INT_GATE:
4383 f32BitGate = false;
4384 case X86_SEL_TYPE_SYS_386_INT_GATE:
4385 fEflToClear |= X86_EFL_IF;
4386 break;
4387
4388 case X86_SEL_TYPE_SYS_TASK_GATE:
4389 fTaskGate = true;
4390#ifndef IEM_IMPLEMENTS_TASKSWITCH
4391 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4392#endif
4393 break;
4394
4395 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4396 f32BitGate = false;
4397 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4398 break;
4399
4400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4401 }
4402
4403 /* Check DPL against CPL if applicable. */
4404 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4405 {
4406 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4407 {
4408 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4409 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4410 }
4411 }
4412
4413 /* Is it there? */
4414 if (!Idte.Gate.u1Present)
4415 {
4416 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4417 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4418 }
4419
4420 /* Is it a task-gate? */
4421 if (fTaskGate)
4422 {
4423 /*
4424 * Construct the error code masks based on what caused this task switch.
4425 * See Intel Instruction reference for INT.
4426 */
4427 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4428 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4429 RTSEL SelTSS = Idte.Gate.u16Sel;
4430
4431 /*
4432 * Fetch the TSS descriptor in the GDT.
4433 */
4434 IEMSELDESC DescTSS;
4435 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4436 if (rcStrict != VINF_SUCCESS)
4437 {
4438 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4439 VBOXSTRICTRC_VAL(rcStrict)));
4440 return rcStrict;
4441 }
4442
4443 /* The TSS descriptor must be a system segment and be available (not busy). */
4444 if ( DescTSS.Legacy.Gen.u1DescType
4445 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4446 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4447 {
4448 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4449 u8Vector, SelTSS, DescTSS.Legacy.au64));
4450 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4451 }
4452
4453 /* The TSS must be present. */
4454 if (!DescTSS.Legacy.Gen.u1Present)
4455 {
4456 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4457 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4458 }
4459
4460 /* Do the actual task switch. */
4461 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4462 }
4463
4464 /* A null CS is bad. */
4465 RTSEL NewCS = Idte.Gate.u16Sel;
4466 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4467 {
4468 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4469 return iemRaiseGeneralProtectionFault0(pVCpu);
4470 }
4471
4472 /* Fetch the descriptor for the new CS. */
4473 IEMSELDESC DescCS;
4474 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4475 if (rcStrict != VINF_SUCCESS)
4476 {
4477 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4478 return rcStrict;
4479 }
4480
4481 /* Must be a code segment. */
4482 if (!DescCS.Legacy.Gen.u1DescType)
4483 {
4484 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4485 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4486 }
4487 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4488 {
4489 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4490 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4491 }
4492
4493 /* Don't allow lowering the privilege level. */
4494 /** @todo Does the lowering of privileges apply to software interrupts
4495 * only? This has bearings on the more-privileged or
4496 * same-privilege stack behavior further down. A testcase would
4497 * be nice. */
4498 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4499 {
4500 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4501 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4502 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4503 }
4504
4505 /* Make sure the selector is present. */
4506 if (!DescCS.Legacy.Gen.u1Present)
4507 {
4508 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4509 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4510 }
4511
4512 /* Check the new EIP against the new CS limit. */
4513 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4514 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4515 ? Idte.Gate.u16OffsetLow
4516 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4517 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4518 if (uNewEip > cbLimitCS)
4519 {
4520 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4521 u8Vector, uNewEip, cbLimitCS, NewCS));
4522 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4523 }
4524 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4525
4526 /* Calc the flag image to push. */
4527 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4528 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4529 fEfl &= ~X86_EFL_RF;
4530 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4531 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4532
4533 /* From V8086 mode only go to CPL 0. */
4534 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4535 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4536 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4537 {
4538 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4539 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4540 }
4541
4542 /*
4543 * If the privilege level changes, we need to get a new stack from the TSS.
4544 * This in turns means validating the new SS and ESP...
4545 */
4546 if (uNewCpl != pVCpu->iem.s.uCpl)
4547 {
4548 RTSEL NewSS;
4549 uint32_t uNewEsp;
4550 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4551 if (rcStrict != VINF_SUCCESS)
4552 return rcStrict;
4553
4554 IEMSELDESC DescSS;
4555 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4556 if (rcStrict != VINF_SUCCESS)
4557 return rcStrict;
4558 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4559
4560 /* Check that there is sufficient space for the stack frame. */
4561 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4562 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4563 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4564 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4565
4566 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4567 {
4568 if ( uNewEsp - 1 > cbLimitSS
4569 || uNewEsp < cbStackFrame)
4570 {
4571 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4572 u8Vector, NewSS, uNewEsp, cbStackFrame));
4573 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4574 }
4575 }
4576 else
4577 {
4578 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4579 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4580 {
4581 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4582 u8Vector, NewSS, uNewEsp, cbStackFrame));
4583 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4584 }
4585 }
4586
4587 /*
4588 * Start making changes.
4589 */
4590
4591 /* Set the new CPL so that stack accesses use it. */
4592 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4593 pVCpu->iem.s.uCpl = uNewCpl;
4594
4595 /* Create the stack frame. */
4596 RTPTRUNION uStackFrame;
4597 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4598 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4599 if (rcStrict != VINF_SUCCESS)
4600 return rcStrict;
4601 void * const pvStackFrame = uStackFrame.pv;
4602 if (f32BitGate)
4603 {
4604 if (fFlags & IEM_XCPT_FLAGS_ERR)
4605 *uStackFrame.pu32++ = uErr;
4606 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4607 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4608 uStackFrame.pu32[2] = fEfl;
4609 uStackFrame.pu32[3] = pCtx->esp;
4610 uStackFrame.pu32[4] = pCtx->ss.Sel;
4611 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4612 if (fEfl & X86_EFL_VM)
4613 {
4614 uStackFrame.pu32[1] = pCtx->cs.Sel;
4615 uStackFrame.pu32[5] = pCtx->es.Sel;
4616 uStackFrame.pu32[6] = pCtx->ds.Sel;
4617 uStackFrame.pu32[7] = pCtx->fs.Sel;
4618 uStackFrame.pu32[8] = pCtx->gs.Sel;
4619 }
4620 }
4621 else
4622 {
4623 if (fFlags & IEM_XCPT_FLAGS_ERR)
4624 *uStackFrame.pu16++ = uErr;
4625 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4626 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4627 uStackFrame.pu16[2] = fEfl;
4628 uStackFrame.pu16[3] = pCtx->sp;
4629 uStackFrame.pu16[4] = pCtx->ss.Sel;
4630 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4631 if (fEfl & X86_EFL_VM)
4632 {
4633 uStackFrame.pu16[1] = pCtx->cs.Sel;
4634 uStackFrame.pu16[5] = pCtx->es.Sel;
4635 uStackFrame.pu16[6] = pCtx->ds.Sel;
4636 uStackFrame.pu16[7] = pCtx->fs.Sel;
4637 uStackFrame.pu16[8] = pCtx->gs.Sel;
4638 }
4639 }
4640 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4641 if (rcStrict != VINF_SUCCESS)
4642 return rcStrict;
4643
4644 /* Mark the selectors 'accessed' (hope this is the correct time). */
4645 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4646 * after pushing the stack frame? (Write protect the gdt + stack to
4647 * find out.) */
4648 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4649 {
4650 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4651 if (rcStrict != VINF_SUCCESS)
4652 return rcStrict;
4653 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4654 }
4655
4656 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4657 {
4658 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4659 if (rcStrict != VINF_SUCCESS)
4660 return rcStrict;
4661 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4662 }
4663
4664 /*
4665 * Start comitting the register changes (joins with the DPL=CPL branch).
4666 */
4667 pCtx->ss.Sel = NewSS;
4668 pCtx->ss.ValidSel = NewSS;
4669 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4670 pCtx->ss.u32Limit = cbLimitSS;
4671 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4672 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4673 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4674 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4675 * SP is loaded).
4676 * Need to check the other combinations too:
4677 * - 16-bit TSS, 32-bit handler
4678 * - 32-bit TSS, 16-bit handler */
4679 if (!pCtx->ss.Attr.n.u1DefBig)
4680 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4681 else
4682 pCtx->rsp = uNewEsp - cbStackFrame;
4683
4684 if (fEfl & X86_EFL_VM)
4685 {
4686 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4687 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4688 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4689 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4690 }
4691 }
4692 /*
4693 * Same privilege, no stack change and smaller stack frame.
4694 */
4695 else
4696 {
4697 uint64_t uNewRsp;
4698 RTPTRUNION uStackFrame;
4699 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4700 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4701 if (rcStrict != VINF_SUCCESS)
4702 return rcStrict;
4703 void * const pvStackFrame = uStackFrame.pv;
4704
4705 if (f32BitGate)
4706 {
4707 if (fFlags & IEM_XCPT_FLAGS_ERR)
4708 *uStackFrame.pu32++ = uErr;
4709 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4710 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4711 uStackFrame.pu32[2] = fEfl;
4712 }
4713 else
4714 {
4715 if (fFlags & IEM_XCPT_FLAGS_ERR)
4716 *uStackFrame.pu16++ = uErr;
4717 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4718 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4719 uStackFrame.pu16[2] = fEfl;
4720 }
4721 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4722 if (rcStrict != VINF_SUCCESS)
4723 return rcStrict;
4724
4725 /* Mark the CS selector as 'accessed'. */
4726 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4727 {
4728 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4729 if (rcStrict != VINF_SUCCESS)
4730 return rcStrict;
4731 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4732 }
4733
4734 /*
4735 * Start committing the register changes (joins with the other branch).
4736 */
4737 pCtx->rsp = uNewRsp;
4738 }
4739
4740 /* ... register committing continues. */
4741 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4742 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4743 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4744 pCtx->cs.u32Limit = cbLimitCS;
4745 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4746 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4747
4748 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4749 fEfl &= ~fEflToClear;
4750 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4751
4752 if (fFlags & IEM_XCPT_FLAGS_CR2)
4753 pCtx->cr2 = uCr2;
4754
4755 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4756 iemRaiseXcptAdjustState(pCtx, u8Vector);
4757
4758 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4759}
4760
4761
4762/**
4763 * Implements exceptions and interrupts for long mode.
4764 *
4765 * @returns VBox strict status code.
4766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4767 * @param pCtx The CPU context.
4768 * @param cbInstr The number of bytes to offset rIP by in the return
4769 * address.
4770 * @param u8Vector The interrupt / exception vector number.
4771 * @param fFlags The flags.
4772 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4773 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4774 */
4775IEM_STATIC VBOXSTRICTRC
4776iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4777 PCPUMCTX pCtx,
4778 uint8_t cbInstr,
4779 uint8_t u8Vector,
4780 uint32_t fFlags,
4781 uint16_t uErr,
4782 uint64_t uCr2)
4783{
4784 /*
4785 * Read the IDT entry.
4786 */
4787 uint16_t offIdt = (uint16_t)u8Vector << 4;
4788 if (pCtx->idtr.cbIdt < offIdt + 7)
4789 {
4790 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4791 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4792 }
4793 X86DESC64 Idte;
4794 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4795 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4796 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4797 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4798 return rcStrict;
4799 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4800 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4801 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4802
4803 /*
4804 * Check the descriptor type, DPL and such.
4805 * ASSUMES this is done in the same order as described for call-gate calls.
4806 */
4807 if (Idte.Gate.u1DescType)
4808 {
4809 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4810 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4811 }
4812 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4813 switch (Idte.Gate.u4Type)
4814 {
4815 case AMD64_SEL_TYPE_SYS_INT_GATE:
4816 fEflToClear |= X86_EFL_IF;
4817 break;
4818 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4819 break;
4820
4821 default:
4822 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4823 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4824 }
4825
4826 /* Check DPL against CPL if applicable. */
4827 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4828 {
4829 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4830 {
4831 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4832 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4833 }
4834 }
4835
4836 /* Is it there? */
4837 if (!Idte.Gate.u1Present)
4838 {
4839 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4840 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4841 }
4842
4843 /* A null CS is bad. */
4844 RTSEL NewCS = Idte.Gate.u16Sel;
4845 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4846 {
4847 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4848 return iemRaiseGeneralProtectionFault0(pVCpu);
4849 }
4850
4851 /* Fetch the descriptor for the new CS. */
4852 IEMSELDESC DescCS;
4853 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4854 if (rcStrict != VINF_SUCCESS)
4855 {
4856 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4857 return rcStrict;
4858 }
4859
4860 /* Must be a 64-bit code segment. */
4861 if (!DescCS.Long.Gen.u1DescType)
4862 {
4863 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4864 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4865 }
4866 if ( !DescCS.Long.Gen.u1Long
4867 || DescCS.Long.Gen.u1DefBig
4868 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4869 {
4870 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4871 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4872 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4873 }
4874
4875 /* Don't allow lowering the privilege level. For non-conforming CS
4876 selectors, the CS.DPL sets the privilege level the trap/interrupt
4877 handler runs at. For conforming CS selectors, the CPL remains
4878 unchanged, but the CS.DPL must be <= CPL. */
4879 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4880 * when CPU in Ring-0. Result \#GP? */
4881 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4882 {
4883 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4884 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4885 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4886 }
4887
4888
4889 /* Make sure the selector is present. */
4890 if (!DescCS.Legacy.Gen.u1Present)
4891 {
4892 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4893 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4894 }
4895
4896 /* Check that the new RIP is canonical. */
4897 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4898 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4899 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4900 if (!IEM_IS_CANONICAL(uNewRip))
4901 {
4902 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4903 return iemRaiseGeneralProtectionFault0(pVCpu);
4904 }
4905
4906 /*
4907 * If the privilege level changes or if the IST isn't zero, we need to get
4908 * a new stack from the TSS.
4909 */
4910 uint64_t uNewRsp;
4911 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4912 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4913 if ( uNewCpl != pVCpu->iem.s.uCpl
4914 || Idte.Gate.u3IST != 0)
4915 {
4916 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4917 if (rcStrict != VINF_SUCCESS)
4918 return rcStrict;
4919 }
4920 else
4921 uNewRsp = pCtx->rsp;
4922 uNewRsp &= ~(uint64_t)0xf;
4923
4924 /*
4925 * Calc the flag image to push.
4926 */
4927 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4928 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4929 fEfl &= ~X86_EFL_RF;
4930 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4931 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4932
4933 /*
4934 * Start making changes.
4935 */
4936 /* Set the new CPL so that stack accesses use it. */
4937 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4938 pVCpu->iem.s.uCpl = uNewCpl;
4939
4940 /* Create the stack frame. */
4941 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4942 RTPTRUNION uStackFrame;
4943 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4944 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4945 if (rcStrict != VINF_SUCCESS)
4946 return rcStrict;
4947 void * const pvStackFrame = uStackFrame.pv;
4948
4949 if (fFlags & IEM_XCPT_FLAGS_ERR)
4950 *uStackFrame.pu64++ = uErr;
4951 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4952 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4953 uStackFrame.pu64[2] = fEfl;
4954 uStackFrame.pu64[3] = pCtx->rsp;
4955 uStackFrame.pu64[4] = pCtx->ss.Sel;
4956 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4957 if (rcStrict != VINF_SUCCESS)
4958 return rcStrict;
4959
4960 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4961 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4962 * after pushing the stack frame? (Write protect the gdt + stack to
4963 * find out.) */
4964 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4965 {
4966 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4967 if (rcStrict != VINF_SUCCESS)
4968 return rcStrict;
4969 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4970 }
4971
4972 /*
4973 * Start comitting the register changes.
4974 */
4975 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4976 * hidden registers when interrupting 32-bit or 16-bit code! */
4977 if (uNewCpl != uOldCpl)
4978 {
4979 pCtx->ss.Sel = 0 | uNewCpl;
4980 pCtx->ss.ValidSel = 0 | uNewCpl;
4981 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4982 pCtx->ss.u32Limit = UINT32_MAX;
4983 pCtx->ss.u64Base = 0;
4984 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4985 }
4986 pCtx->rsp = uNewRsp - cbStackFrame;
4987 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4988 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4989 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4990 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4991 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4992 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4993 pCtx->rip = uNewRip;
4994
4995 fEfl &= ~fEflToClear;
4996 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4997
4998 if (fFlags & IEM_XCPT_FLAGS_CR2)
4999 pCtx->cr2 = uCr2;
5000
5001 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5002 iemRaiseXcptAdjustState(pCtx, u8Vector);
5003
5004 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5005}
5006
5007
5008/**
5009 * Implements exceptions and interrupts.
5010 *
5011 * All exceptions and interrupts goes thru this function!
5012 *
5013 * @returns VBox strict status code.
5014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5015 * @param cbInstr The number of bytes to offset rIP by in the return
5016 * address.
5017 * @param u8Vector The interrupt / exception vector number.
5018 * @param fFlags The flags.
5019 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5020 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5021 */
5022DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5023iemRaiseXcptOrInt(PVMCPU pVCpu,
5024 uint8_t cbInstr,
5025 uint8_t u8Vector,
5026 uint32_t fFlags,
5027 uint16_t uErr,
5028 uint64_t uCr2)
5029{
5030 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5031#ifdef IN_RING0
5032 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5033 AssertRCReturn(rc, rc);
5034#endif
5035
5036#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5037 /*
5038 * Flush prefetch buffer
5039 */
5040 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5041#endif
5042
5043 /*
5044 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5045 */
5046 if ( pCtx->eflags.Bits.u1VM
5047 && pCtx->eflags.Bits.u2IOPL != 3
5048 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5049 && (pCtx->cr0 & X86_CR0_PE) )
5050 {
5051 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5052 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5053 u8Vector = X86_XCPT_GP;
5054 uErr = 0;
5055 }
5056#ifdef DBGFTRACE_ENABLED
5057 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5058 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5059 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5060#endif
5061
5062 /*
5063 * Do recursion accounting.
5064 */
5065 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5066 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5067 if (pVCpu->iem.s.cXcptRecursions == 0)
5068 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5069 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5070 else
5071 {
5072 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5073 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5074
5075 /** @todo double and tripple faults. */
5076 if (pVCpu->iem.s.cXcptRecursions >= 3)
5077 {
5078#ifdef DEBUG_bird
5079 AssertFailed();
5080#endif
5081 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5082 }
5083
5084 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5085 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5086 {
5087 ....
5088 } */
5089 }
5090 pVCpu->iem.s.cXcptRecursions++;
5091 pVCpu->iem.s.uCurXcpt = u8Vector;
5092 pVCpu->iem.s.fCurXcpt = fFlags;
5093
5094 /*
5095 * Extensive logging.
5096 */
5097#if defined(LOG_ENABLED) && defined(IN_RING3)
5098 if (LogIs3Enabled())
5099 {
5100 PVM pVM = pVCpu->CTX_SUFF(pVM);
5101 char szRegs[4096];
5102 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5103 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5104 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5105 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5106 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5107 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5108 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5109 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5110 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5111 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5112 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5113 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5114 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5115 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5116 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5117 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5118 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5119 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5120 " efer=%016VR{efer}\n"
5121 " pat=%016VR{pat}\n"
5122 " sf_mask=%016VR{sf_mask}\n"
5123 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5124 " lstar=%016VR{lstar}\n"
5125 " star=%016VR{star} cstar=%016VR{cstar}\n"
5126 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5127 );
5128
5129 char szInstr[256];
5130 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5131 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5132 szInstr, sizeof(szInstr), NULL);
5133 Log3(("%s%s\n", szRegs, szInstr));
5134 }
5135#endif /* LOG_ENABLED */
5136
5137 /*
5138 * Call the mode specific worker function.
5139 */
5140 VBOXSTRICTRC rcStrict;
5141 if (!(pCtx->cr0 & X86_CR0_PE))
5142 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5143 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5144 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5145 else
5146 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5147
5148 /* Flush the prefetch buffer. */
5149#ifdef IEM_WITH_CODE_TLB
5150 pVCpu->iem.s.pbInstrBuf = NULL;
5151#else
5152 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5153#endif
5154
5155 /*
5156 * Unwind.
5157 */
5158 pVCpu->iem.s.cXcptRecursions--;
5159 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5160 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5161 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5162 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5163 return rcStrict;
5164}
5165
5166#ifdef IEM_WITH_SETJMP
5167/**
5168 * See iemRaiseXcptOrInt. Will not return.
5169 */
5170IEM_STATIC DECL_NO_RETURN(void)
5171iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5172 uint8_t cbInstr,
5173 uint8_t u8Vector,
5174 uint32_t fFlags,
5175 uint16_t uErr,
5176 uint64_t uCr2)
5177{
5178 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5179 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5180}
5181#endif
5182
5183
5184/** \#DE - 00. */
5185DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5186{
5187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5188}
5189
5190
5191/** \#DB - 01.
5192 * @note This automatically clear DR7.GD. */
5193DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5194{
5195 /** @todo set/clear RF. */
5196 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5197 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5198}
5199
5200
5201/** \#UD - 06. */
5202DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5203{
5204 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5205}
5206
5207
5208/** \#NM - 07. */
5209DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5210{
5211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5212}
5213
5214
5215/** \#TS(err) - 0a. */
5216DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5217{
5218 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5219}
5220
5221
5222/** \#TS(tr) - 0a. */
5223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5224{
5225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5226 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5227}
5228
5229
5230/** \#TS(0) - 0a. */
5231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5232{
5233 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5234 0, 0);
5235}
5236
5237
5238/** \#TS(err) - 0a. */
5239DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5240{
5241 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5242 uSel & X86_SEL_MASK_OFF_RPL, 0);
5243}
5244
5245
5246/** \#NP(err) - 0b. */
5247DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5248{
5249 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5250}
5251
5252
5253/** \#NP(seg) - 0b. */
5254DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5255{
5256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5257 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5258}
5259
5260
5261/** \#NP(sel) - 0b. */
5262DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5263{
5264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5265 uSel & ~X86_SEL_RPL, 0);
5266}
5267
5268
5269/** \#SS(seg) - 0c. */
5270DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5271{
5272 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5273 uSel & ~X86_SEL_RPL, 0);
5274}
5275
5276
5277/** \#SS(err) - 0c. */
5278DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5279{
5280 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5281}
5282
5283
5284/** \#GP(n) - 0d. */
5285DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5286{
5287 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5288}
5289
5290
5291/** \#GP(0) - 0d. */
5292DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5293{
5294 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5295}
5296
5297#ifdef IEM_WITH_SETJMP
5298/** \#GP(0) - 0d. */
5299DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5300{
5301 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5302}
5303#endif
5304
5305
5306/** \#GP(sel) - 0d. */
5307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5308{
5309 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5310 Sel & ~X86_SEL_RPL, 0);
5311}
5312
5313
5314/** \#GP(0) - 0d. */
5315DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5316{
5317 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5318}
5319
5320
5321/** \#GP(sel) - 0d. */
5322DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5323{
5324 NOREF(iSegReg); NOREF(fAccess);
5325 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5326 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5327}
5328
5329#ifdef IEM_WITH_SETJMP
5330/** \#GP(sel) - 0d, longjmp. */
5331DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5332{
5333 NOREF(iSegReg); NOREF(fAccess);
5334 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5335 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5336}
5337#endif
5338
5339/** \#GP(sel) - 0d. */
5340DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5341{
5342 NOREF(Sel);
5343 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5344}
5345
5346#ifdef IEM_WITH_SETJMP
5347/** \#GP(sel) - 0d, longjmp. */
5348DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5349{
5350 NOREF(Sel);
5351 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5352}
5353#endif
5354
5355
5356/** \#GP(sel) - 0d. */
5357DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5358{
5359 NOREF(iSegReg); NOREF(fAccess);
5360 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5361}
5362
5363#ifdef IEM_WITH_SETJMP
5364/** \#GP(sel) - 0d, longjmp. */
5365DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5366 uint32_t fAccess)
5367{
5368 NOREF(iSegReg); NOREF(fAccess);
5369 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5370}
5371#endif
5372
5373
5374/** \#PF(n) - 0e. */
5375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5376{
5377 uint16_t uErr;
5378 switch (rc)
5379 {
5380 case VERR_PAGE_NOT_PRESENT:
5381 case VERR_PAGE_TABLE_NOT_PRESENT:
5382 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5383 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5384 uErr = 0;
5385 break;
5386
5387 default:
5388 AssertMsgFailed(("%Rrc\n", rc));
5389 case VERR_ACCESS_DENIED:
5390 uErr = X86_TRAP_PF_P;
5391 break;
5392
5393 /** @todo reserved */
5394 }
5395
5396 if (pVCpu->iem.s.uCpl == 3)
5397 uErr |= X86_TRAP_PF_US;
5398
5399 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5400 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5401 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5402 uErr |= X86_TRAP_PF_ID;
5403
5404#if 0 /* This is so much non-sense, really. Why was it done like that? */
5405 /* Note! RW access callers reporting a WRITE protection fault, will clear
5406 the READ flag before calling. So, read-modify-write accesses (RW)
5407 can safely be reported as READ faults. */
5408 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5409 uErr |= X86_TRAP_PF_RW;
5410#else
5411 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5412 {
5413 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5414 uErr |= X86_TRAP_PF_RW;
5415 }
5416#endif
5417
5418 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5419 uErr, GCPtrWhere);
5420}
5421
5422#ifdef IEM_WITH_SETJMP
5423/** \#PF(n) - 0e, longjmp. */
5424IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5425{
5426 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5427}
5428#endif
5429
5430
5431/** \#MF(0) - 10. */
5432DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5433{
5434 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5435}
5436
5437
5438/** \#AC(0) - 11. */
5439DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5440{
5441 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5442}
5443
5444
5445/**
5446 * Macro for calling iemCImplRaiseDivideError().
5447 *
5448 * This enables us to add/remove arguments and force different levels of
5449 * inlining as we wish.
5450 *
5451 * @return Strict VBox status code.
5452 */
5453#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5454IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5455{
5456 NOREF(cbInstr);
5457 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5458}
5459
5460
5461/**
5462 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5463 *
5464 * This enables us to add/remove arguments and force different levels of
5465 * inlining as we wish.
5466 *
5467 * @return Strict VBox status code.
5468 */
5469#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5470IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5471{
5472 NOREF(cbInstr);
5473 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5474}
5475
5476
5477/**
5478 * Macro for calling iemCImplRaiseInvalidOpcode().
5479 *
5480 * This enables us to add/remove arguments and force different levels of
5481 * inlining as we wish.
5482 *
5483 * @return Strict VBox status code.
5484 */
5485#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5486IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5487{
5488 NOREF(cbInstr);
5489 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5490}
5491
5492
5493/** @} */
5494
5495
5496/*
5497 *
5498 * Helpers routines.
5499 * Helpers routines.
5500 * Helpers routines.
5501 *
5502 */
5503
5504/**
5505 * Recalculates the effective operand size.
5506 *
5507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5508 */
5509IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5510{
5511 switch (pVCpu->iem.s.enmCpuMode)
5512 {
5513 case IEMMODE_16BIT:
5514 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5515 break;
5516 case IEMMODE_32BIT:
5517 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5518 break;
5519 case IEMMODE_64BIT:
5520 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5521 {
5522 case 0:
5523 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5524 break;
5525 case IEM_OP_PRF_SIZE_OP:
5526 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5527 break;
5528 case IEM_OP_PRF_SIZE_REX_W:
5529 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5530 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5531 break;
5532 }
5533 break;
5534 default:
5535 AssertFailed();
5536 }
5537}
5538
5539
5540/**
5541 * Sets the default operand size to 64-bit and recalculates the effective
5542 * operand size.
5543 *
5544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5545 */
5546IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5547{
5548 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5549 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5550 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5551 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5552 else
5553 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5554}
5555
5556
5557/*
5558 *
5559 * Common opcode decoders.
5560 * Common opcode decoders.
5561 * Common opcode decoders.
5562 *
5563 */
5564//#include <iprt/mem.h>
5565
5566/**
5567 * Used to add extra details about a stub case.
5568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5569 */
5570IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5571{
5572#if defined(LOG_ENABLED) && defined(IN_RING3)
5573 PVM pVM = pVCpu->CTX_SUFF(pVM);
5574 char szRegs[4096];
5575 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5576 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5577 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5578 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5579 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5580 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5581 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5582 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5583 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5584 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5585 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5586 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5587 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5588 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5589 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5590 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5591 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5592 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5593 " efer=%016VR{efer}\n"
5594 " pat=%016VR{pat}\n"
5595 " sf_mask=%016VR{sf_mask}\n"
5596 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5597 " lstar=%016VR{lstar}\n"
5598 " star=%016VR{star} cstar=%016VR{cstar}\n"
5599 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5600 );
5601
5602 char szInstr[256];
5603 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5604 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5605 szInstr, sizeof(szInstr), NULL);
5606
5607 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5608#else
5609 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5610#endif
5611}
5612
5613/**
5614 * Complains about a stub.
5615 *
5616 * Providing two versions of this macro, one for daily use and one for use when
5617 * working on IEM.
5618 */
5619#if 0
5620# define IEMOP_BITCH_ABOUT_STUB() \
5621 do { \
5622 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5623 iemOpStubMsg2(pVCpu); \
5624 RTAssertPanic(); \
5625 } while (0)
5626#else
5627# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5628#endif
5629
5630/** Stubs an opcode. */
5631#define FNIEMOP_STUB(a_Name) \
5632 FNIEMOP_DEF(a_Name) \
5633 { \
5634 RT_NOREF_PV(pVCpu); \
5635 IEMOP_BITCH_ABOUT_STUB(); \
5636 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5637 } \
5638 typedef int ignore_semicolon
5639
5640/** Stubs an opcode. */
5641#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5642 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5643 { \
5644 RT_NOREF_PV(pVCpu); \
5645 RT_NOREF_PV(a_Name0); \
5646 IEMOP_BITCH_ABOUT_STUB(); \
5647 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5648 } \
5649 typedef int ignore_semicolon
5650
5651/** Stubs an opcode which currently should raise \#UD. */
5652#define FNIEMOP_UD_STUB(a_Name) \
5653 FNIEMOP_DEF(a_Name) \
5654 { \
5655 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5656 return IEMOP_RAISE_INVALID_OPCODE(); \
5657 } \
5658 typedef int ignore_semicolon
5659
5660/** Stubs an opcode which currently should raise \#UD. */
5661#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5662 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5663 { \
5664 RT_NOREF_PV(pVCpu); \
5665 RT_NOREF_PV(a_Name0); \
5666 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5667 return IEMOP_RAISE_INVALID_OPCODE(); \
5668 } \
5669 typedef int ignore_semicolon
5670
5671
5672
5673/** @name Register Access.
5674 * @{
5675 */
5676
5677/**
5678 * Gets a reference (pointer) to the specified hidden segment register.
5679 *
5680 * @returns Hidden register reference.
5681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5682 * @param iSegReg The segment register.
5683 */
5684IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5685{
5686 Assert(iSegReg < X86_SREG_COUNT);
5687 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5688 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5689
5690#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5691 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5692 { /* likely */ }
5693 else
5694 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5695#else
5696 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5697#endif
5698 return pSReg;
5699}
5700
5701
5702/**
5703 * Ensures that the given hidden segment register is up to date.
5704 *
5705 * @returns Hidden register reference.
5706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5707 * @param pSReg The segment register.
5708 */
5709IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5710{
5711#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5712 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5713 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5714#else
5715 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5716 NOREF(pVCpu);
5717#endif
5718 return pSReg;
5719}
5720
5721
5722/**
5723 * Gets a reference (pointer) to the specified segment register (the selector
5724 * value).
5725 *
5726 * @returns Pointer to the selector variable.
5727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5728 * @param iSegReg The segment register.
5729 */
5730DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5731{
5732 Assert(iSegReg < X86_SREG_COUNT);
5733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5734 return &pCtx->aSRegs[iSegReg].Sel;
5735}
5736
5737
5738/**
5739 * Fetches the selector value of a segment register.
5740 *
5741 * @returns The selector value.
5742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5743 * @param iSegReg The segment register.
5744 */
5745DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5746{
5747 Assert(iSegReg < X86_SREG_COUNT);
5748 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5749}
5750
5751
5752/**
5753 * Gets a reference (pointer) to the specified general purpose register.
5754 *
5755 * @returns Register reference.
5756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5757 * @param iReg The general purpose register.
5758 */
5759DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5760{
5761 Assert(iReg < 16);
5762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5763 return &pCtx->aGRegs[iReg];
5764}
5765
5766
5767/**
5768 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5769 *
5770 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5771 *
5772 * @returns Register reference.
5773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5774 * @param iReg The register.
5775 */
5776DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5777{
5778 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5779 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5780 {
5781 Assert(iReg < 16);
5782 return &pCtx->aGRegs[iReg].u8;
5783 }
5784 /* high 8-bit register. */
5785 Assert(iReg < 8);
5786 return &pCtx->aGRegs[iReg & 3].bHi;
5787}
5788
5789
5790/**
5791 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5792 *
5793 * @returns Register reference.
5794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5795 * @param iReg The register.
5796 */
5797DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5798{
5799 Assert(iReg < 16);
5800 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5801 return &pCtx->aGRegs[iReg].u16;
5802}
5803
5804
5805/**
5806 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5807 *
5808 * @returns Register reference.
5809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5810 * @param iReg The register.
5811 */
5812DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5813{
5814 Assert(iReg < 16);
5815 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5816 return &pCtx->aGRegs[iReg].u32;
5817}
5818
5819
5820/**
5821 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5822 *
5823 * @returns Register reference.
5824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5825 * @param iReg The register.
5826 */
5827DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5828{
5829 Assert(iReg < 64);
5830 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5831 return &pCtx->aGRegs[iReg].u64;
5832}
5833
5834
5835/**
5836 * Fetches the value of a 8-bit general purpose register.
5837 *
5838 * @returns The register value.
5839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5840 * @param iReg The register.
5841 */
5842DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5843{
5844 return *iemGRegRefU8(pVCpu, iReg);
5845}
5846
5847
5848/**
5849 * Fetches the value of a 16-bit general purpose register.
5850 *
5851 * @returns The register value.
5852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5853 * @param iReg The register.
5854 */
5855DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5856{
5857 Assert(iReg < 16);
5858 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5859}
5860
5861
5862/**
5863 * Fetches the value of a 32-bit general purpose register.
5864 *
5865 * @returns The register value.
5866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5867 * @param iReg The register.
5868 */
5869DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5870{
5871 Assert(iReg < 16);
5872 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5873}
5874
5875
5876/**
5877 * Fetches the value of a 64-bit general purpose register.
5878 *
5879 * @returns The register value.
5880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5881 * @param iReg The register.
5882 */
5883DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5884{
5885 Assert(iReg < 16);
5886 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5887}
5888
5889
5890/**
5891 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5892 *
5893 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5894 * segment limit.
5895 *
5896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5897 * @param offNextInstr The offset of the next instruction.
5898 */
5899IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5900{
5901 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5902 switch (pVCpu->iem.s.enmEffOpSize)
5903 {
5904 case IEMMODE_16BIT:
5905 {
5906 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5907 if ( uNewIp > pCtx->cs.u32Limit
5908 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5909 return iemRaiseGeneralProtectionFault0(pVCpu);
5910 pCtx->rip = uNewIp;
5911 break;
5912 }
5913
5914 case IEMMODE_32BIT:
5915 {
5916 Assert(pCtx->rip <= UINT32_MAX);
5917 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5918
5919 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5920 if (uNewEip > pCtx->cs.u32Limit)
5921 return iemRaiseGeneralProtectionFault0(pVCpu);
5922 pCtx->rip = uNewEip;
5923 break;
5924 }
5925
5926 case IEMMODE_64BIT:
5927 {
5928 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5929
5930 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5931 if (!IEM_IS_CANONICAL(uNewRip))
5932 return iemRaiseGeneralProtectionFault0(pVCpu);
5933 pCtx->rip = uNewRip;
5934 break;
5935 }
5936
5937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5938 }
5939
5940 pCtx->eflags.Bits.u1RF = 0;
5941
5942#ifndef IEM_WITH_CODE_TLB
5943 /* Flush the prefetch buffer. */
5944 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5945#endif
5946
5947 return VINF_SUCCESS;
5948}
5949
5950
5951/**
5952 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5953 *
5954 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5955 * segment limit.
5956 *
5957 * @returns Strict VBox status code.
5958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5959 * @param offNextInstr The offset of the next instruction.
5960 */
5961IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5962{
5963 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5964 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5965
5966 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5967 if ( uNewIp > pCtx->cs.u32Limit
5968 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5969 return iemRaiseGeneralProtectionFault0(pVCpu);
5970 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5971 pCtx->rip = uNewIp;
5972 pCtx->eflags.Bits.u1RF = 0;
5973
5974#ifndef IEM_WITH_CODE_TLB
5975 /* Flush the prefetch buffer. */
5976 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5977#endif
5978
5979 return VINF_SUCCESS;
5980}
5981
5982
5983/**
5984 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5985 *
5986 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5987 * segment limit.
5988 *
5989 * @returns Strict VBox status code.
5990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5991 * @param offNextInstr The offset of the next instruction.
5992 */
5993IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5994{
5995 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5996 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5997
5998 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5999 {
6000 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6001
6002 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6003 if (uNewEip > pCtx->cs.u32Limit)
6004 return iemRaiseGeneralProtectionFault0(pVCpu);
6005 pCtx->rip = uNewEip;
6006 }
6007 else
6008 {
6009 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6010
6011 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6012 if (!IEM_IS_CANONICAL(uNewRip))
6013 return iemRaiseGeneralProtectionFault0(pVCpu);
6014 pCtx->rip = uNewRip;
6015 }
6016 pCtx->eflags.Bits.u1RF = 0;
6017
6018#ifndef IEM_WITH_CODE_TLB
6019 /* Flush the prefetch buffer. */
6020 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6021#endif
6022
6023 return VINF_SUCCESS;
6024}
6025
6026
6027/**
6028 * Performs a near jump to the specified address.
6029 *
6030 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6031 * segment limit.
6032 *
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 * @param uNewRip The new RIP value.
6035 */
6036IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6037{
6038 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6039 switch (pVCpu->iem.s.enmEffOpSize)
6040 {
6041 case IEMMODE_16BIT:
6042 {
6043 Assert(uNewRip <= UINT16_MAX);
6044 if ( uNewRip > pCtx->cs.u32Limit
6045 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6046 return iemRaiseGeneralProtectionFault0(pVCpu);
6047 /** @todo Test 16-bit jump in 64-bit mode. */
6048 pCtx->rip = uNewRip;
6049 break;
6050 }
6051
6052 case IEMMODE_32BIT:
6053 {
6054 Assert(uNewRip <= UINT32_MAX);
6055 Assert(pCtx->rip <= UINT32_MAX);
6056 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6057
6058 if (uNewRip > pCtx->cs.u32Limit)
6059 return iemRaiseGeneralProtectionFault0(pVCpu);
6060 pCtx->rip = uNewRip;
6061 break;
6062 }
6063
6064 case IEMMODE_64BIT:
6065 {
6066 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6067
6068 if (!IEM_IS_CANONICAL(uNewRip))
6069 return iemRaiseGeneralProtectionFault0(pVCpu);
6070 pCtx->rip = uNewRip;
6071 break;
6072 }
6073
6074 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6075 }
6076
6077 pCtx->eflags.Bits.u1RF = 0;
6078
6079#ifndef IEM_WITH_CODE_TLB
6080 /* Flush the prefetch buffer. */
6081 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6082#endif
6083
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * Get the address of the top of the stack.
6090 *
6091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6092 * @param pCtx The CPU context which SP/ESP/RSP should be
6093 * read.
6094 */
6095DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6096{
6097 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6098 return pCtx->rsp;
6099 if (pCtx->ss.Attr.n.u1DefBig)
6100 return pCtx->esp;
6101 return pCtx->sp;
6102}
6103
6104
6105/**
6106 * Updates the RIP/EIP/IP to point to the next instruction.
6107 *
6108 * This function leaves the EFLAGS.RF flag alone.
6109 *
6110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6111 * @param cbInstr The number of bytes to add.
6112 */
6113IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6114{
6115 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6116 switch (pVCpu->iem.s.enmCpuMode)
6117 {
6118 case IEMMODE_16BIT:
6119 Assert(pCtx->rip <= UINT16_MAX);
6120 pCtx->eip += cbInstr;
6121 pCtx->eip &= UINT32_C(0xffff);
6122 break;
6123
6124 case IEMMODE_32BIT:
6125 pCtx->eip += cbInstr;
6126 Assert(pCtx->rip <= UINT32_MAX);
6127 break;
6128
6129 case IEMMODE_64BIT:
6130 pCtx->rip += cbInstr;
6131 break;
6132 default: AssertFailed();
6133 }
6134}
6135
6136
6137#if 0
6138/**
6139 * Updates the RIP/EIP/IP to point to the next instruction.
6140 *
6141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6142 */
6143IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6144{
6145 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6146}
6147#endif
6148
6149
6150
6151/**
6152 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6153 *
6154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6155 * @param cbInstr The number of bytes to add.
6156 */
6157IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6158{
6159 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6160
6161 pCtx->eflags.Bits.u1RF = 0;
6162
6163 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6164#if ARCH_BITS >= 64
6165 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6166 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6167 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6168#else
6169 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6170 pCtx->rip += cbInstr;
6171 else
6172 {
6173 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6174 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6175 }
6176#endif
6177}
6178
6179
6180/**
6181 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6182 *
6183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6184 */
6185IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6186{
6187 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6188}
6189
6190
6191/**
6192 * Adds to the stack pointer.
6193 *
6194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6195 * @param pCtx The CPU context which SP/ESP/RSP should be
6196 * updated.
6197 * @param cbToAdd The number of bytes to add (8-bit!).
6198 */
6199DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6200{
6201 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6202 pCtx->rsp += cbToAdd;
6203 else if (pCtx->ss.Attr.n.u1DefBig)
6204 pCtx->esp += cbToAdd;
6205 else
6206 pCtx->sp += cbToAdd;
6207}
6208
6209
6210/**
6211 * Subtracts from the stack pointer.
6212 *
6213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6214 * @param pCtx The CPU context which SP/ESP/RSP should be
6215 * updated.
6216 * @param cbToSub The number of bytes to subtract (8-bit!).
6217 */
6218DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6219{
6220 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6221 pCtx->rsp -= cbToSub;
6222 else if (pCtx->ss.Attr.n.u1DefBig)
6223 pCtx->esp -= cbToSub;
6224 else
6225 pCtx->sp -= cbToSub;
6226}
6227
6228
6229/**
6230 * Adds to the temporary stack pointer.
6231 *
6232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6233 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6234 * @param cbToAdd The number of bytes to add (16-bit).
6235 * @param pCtx Where to get the current stack mode.
6236 */
6237DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6238{
6239 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6240 pTmpRsp->u += cbToAdd;
6241 else if (pCtx->ss.Attr.n.u1DefBig)
6242 pTmpRsp->DWords.dw0 += cbToAdd;
6243 else
6244 pTmpRsp->Words.w0 += cbToAdd;
6245}
6246
6247
6248/**
6249 * Subtracts from the temporary stack pointer.
6250 *
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6253 * @param cbToSub The number of bytes to subtract.
6254 * @param pCtx Where to get the current stack mode.
6255 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6256 * expecting that.
6257 */
6258DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6259{
6260 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6261 pTmpRsp->u -= cbToSub;
6262 else if (pCtx->ss.Attr.n.u1DefBig)
6263 pTmpRsp->DWords.dw0 -= cbToSub;
6264 else
6265 pTmpRsp->Words.w0 -= cbToSub;
6266}
6267
6268
6269/**
6270 * Calculates the effective stack address for a push of the specified size as
6271 * well as the new RSP value (upper bits may be masked).
6272 *
6273 * @returns Effective stack addressf for the push.
6274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6275 * @param pCtx Where to get the current stack mode.
6276 * @param cbItem The size of the stack item to pop.
6277 * @param puNewRsp Where to return the new RSP value.
6278 */
6279DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6280{
6281 RTUINT64U uTmpRsp;
6282 RTGCPTR GCPtrTop;
6283 uTmpRsp.u = pCtx->rsp;
6284
6285 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6286 GCPtrTop = uTmpRsp.u -= cbItem;
6287 else if (pCtx->ss.Attr.n.u1DefBig)
6288 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6289 else
6290 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6291 *puNewRsp = uTmpRsp.u;
6292 return GCPtrTop;
6293}
6294
6295
6296/**
6297 * Gets the current stack pointer and calculates the value after a pop of the
6298 * specified size.
6299 *
6300 * @returns Current stack pointer.
6301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6302 * @param pCtx Where to get the current stack mode.
6303 * @param cbItem The size of the stack item to pop.
6304 * @param puNewRsp Where to return the new RSP value.
6305 */
6306DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6307{
6308 RTUINT64U uTmpRsp;
6309 RTGCPTR GCPtrTop;
6310 uTmpRsp.u = pCtx->rsp;
6311
6312 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6313 {
6314 GCPtrTop = uTmpRsp.u;
6315 uTmpRsp.u += cbItem;
6316 }
6317 else if (pCtx->ss.Attr.n.u1DefBig)
6318 {
6319 GCPtrTop = uTmpRsp.DWords.dw0;
6320 uTmpRsp.DWords.dw0 += cbItem;
6321 }
6322 else
6323 {
6324 GCPtrTop = uTmpRsp.Words.w0;
6325 uTmpRsp.Words.w0 += cbItem;
6326 }
6327 *puNewRsp = uTmpRsp.u;
6328 return GCPtrTop;
6329}
6330
6331
6332/**
6333 * Calculates the effective stack address for a push of the specified size as
6334 * well as the new temporary RSP value (upper bits may be masked).
6335 *
6336 * @returns Effective stack addressf for the push.
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param pCtx Where to get the current stack mode.
6339 * @param pTmpRsp The temporary stack pointer. This is updated.
6340 * @param cbItem The size of the stack item to pop.
6341 */
6342DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6343{
6344 RTGCPTR GCPtrTop;
6345
6346 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6347 GCPtrTop = pTmpRsp->u -= cbItem;
6348 else if (pCtx->ss.Attr.n.u1DefBig)
6349 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6350 else
6351 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6352 return GCPtrTop;
6353}
6354
6355
6356/**
6357 * Gets the effective stack address for a pop of the specified size and
6358 * calculates and updates the temporary RSP.
6359 *
6360 * @returns Current stack pointer.
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param pCtx Where to get the current stack mode.
6363 * @param pTmpRsp The temporary stack pointer. This is updated.
6364 * @param cbItem The size of the stack item to pop.
6365 */
6366DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6367{
6368 RTGCPTR GCPtrTop;
6369 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6370 {
6371 GCPtrTop = pTmpRsp->u;
6372 pTmpRsp->u += cbItem;
6373 }
6374 else if (pCtx->ss.Attr.n.u1DefBig)
6375 {
6376 GCPtrTop = pTmpRsp->DWords.dw0;
6377 pTmpRsp->DWords.dw0 += cbItem;
6378 }
6379 else
6380 {
6381 GCPtrTop = pTmpRsp->Words.w0;
6382 pTmpRsp->Words.w0 += cbItem;
6383 }
6384 return GCPtrTop;
6385}
6386
6387/** @} */
6388
6389
6390/** @name FPU access and helpers.
6391 *
6392 * @{
6393 */
6394
6395
6396/**
6397 * Hook for preparing to use the host FPU.
6398 *
6399 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6400 *
6401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6402 */
6403DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6404{
6405#ifdef IN_RING3
6406 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6407#else
6408 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6409#endif
6410}
6411
6412
6413/**
6414 * Hook for preparing to use the host FPU for SSE
6415 *
6416 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6417 *
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 */
6420DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6421{
6422 iemFpuPrepareUsage(pVCpu);
6423}
6424
6425
6426/**
6427 * Hook for actualizing the guest FPU state before the interpreter reads it.
6428 *
6429 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6430 *
6431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6432 */
6433DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6434{
6435#ifdef IN_RING3
6436 NOREF(pVCpu);
6437#else
6438 CPUMRZFpuStateActualizeForRead(pVCpu);
6439#endif
6440}
6441
6442
6443/**
6444 * Hook for actualizing the guest FPU state before the interpreter changes it.
6445 *
6446 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6447 *
6448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6449 */
6450DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6451{
6452#ifdef IN_RING3
6453 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6454#else
6455 CPUMRZFpuStateActualizeForChange(pVCpu);
6456#endif
6457}
6458
6459
6460/**
6461 * Hook for actualizing the guest XMM0..15 register state for read only.
6462 *
6463 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6464 *
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 */
6467DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6468{
6469#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6470 NOREF(pVCpu);
6471#else
6472 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6473#endif
6474}
6475
6476
6477/**
6478 * Hook for actualizing the guest XMM0..15 register state for read+write.
6479 *
6480 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 */
6484DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6485{
6486#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6487 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6488#else
6489 CPUMRZFpuStateActualizeForChange(pVCpu);
6490#endif
6491}
6492
6493
6494/**
6495 * Stores a QNaN value into a FPU register.
6496 *
6497 * @param pReg Pointer to the register.
6498 */
6499DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6500{
6501 pReg->au32[0] = UINT32_C(0x00000000);
6502 pReg->au32[1] = UINT32_C(0xc0000000);
6503 pReg->au16[4] = UINT16_C(0xffff);
6504}
6505
6506
6507/**
6508 * Updates the FOP, FPU.CS and FPUIP registers.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6511 * @param pCtx The CPU context.
6512 * @param pFpuCtx The FPU context.
6513 */
6514DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6515{
6516 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6517 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6518 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6519 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6520 {
6521 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6522 * happens in real mode here based on the fnsave and fnstenv images. */
6523 pFpuCtx->CS = 0;
6524 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6525 }
6526 else
6527 {
6528 pFpuCtx->CS = pCtx->cs.Sel;
6529 pFpuCtx->FPUIP = pCtx->rip;
6530 }
6531}
6532
6533
6534/**
6535 * Updates the x87.DS and FPUDP registers.
6536 *
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param pCtx The CPU context.
6539 * @param pFpuCtx The FPU context.
6540 * @param iEffSeg The effective segment register.
6541 * @param GCPtrEff The effective address relative to @a iEffSeg.
6542 */
6543DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6544{
6545 RTSEL sel;
6546 switch (iEffSeg)
6547 {
6548 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6549 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6550 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6551 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6552 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6553 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6554 default:
6555 AssertMsgFailed(("%d\n", iEffSeg));
6556 sel = pCtx->ds.Sel;
6557 }
6558 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6559 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6560 {
6561 pFpuCtx->DS = 0;
6562 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6563 }
6564 else
6565 {
6566 pFpuCtx->DS = sel;
6567 pFpuCtx->FPUDP = GCPtrEff;
6568 }
6569}
6570
6571
6572/**
6573 * Rotates the stack registers in the push direction.
6574 *
6575 * @param pFpuCtx The FPU context.
6576 * @remarks This is a complete waste of time, but fxsave stores the registers in
6577 * stack order.
6578 */
6579DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6580{
6581 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6582 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6583 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6584 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6585 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6586 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6587 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6588 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6589 pFpuCtx->aRegs[0].r80 = r80Tmp;
6590}
6591
6592
6593/**
6594 * Rotates the stack registers in the pop direction.
6595 *
6596 * @param pFpuCtx The FPU context.
6597 * @remarks This is a complete waste of time, but fxsave stores the registers in
6598 * stack order.
6599 */
6600DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6601{
6602 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6603 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6604 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6605 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6606 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6607 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6608 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6609 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6610 pFpuCtx->aRegs[7].r80 = r80Tmp;
6611}
6612
6613
6614/**
6615 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6616 * exception prevents it.
6617 *
6618 * @param pResult The FPU operation result to push.
6619 * @param pFpuCtx The FPU context.
6620 */
6621IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6622{
6623 /* Update FSW and bail if there are pending exceptions afterwards. */
6624 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6625 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6626 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6627 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6628 {
6629 pFpuCtx->FSW = fFsw;
6630 return;
6631 }
6632
6633 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6634 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6635 {
6636 /* All is fine, push the actual value. */
6637 pFpuCtx->FTW |= RT_BIT(iNewTop);
6638 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6639 }
6640 else if (pFpuCtx->FCW & X86_FCW_IM)
6641 {
6642 /* Masked stack overflow, push QNaN. */
6643 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6644 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6645 }
6646 else
6647 {
6648 /* Raise stack overflow, don't push anything. */
6649 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6650 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6651 return;
6652 }
6653
6654 fFsw &= ~X86_FSW_TOP_MASK;
6655 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6656 pFpuCtx->FSW = fFsw;
6657
6658 iemFpuRotateStackPush(pFpuCtx);
6659}
6660
6661
6662/**
6663 * Stores a result in a FPU register and updates the FSW and FTW.
6664 *
6665 * @param pFpuCtx The FPU context.
6666 * @param pResult The result to store.
6667 * @param iStReg Which FPU register to store it in.
6668 */
6669IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6670{
6671 Assert(iStReg < 8);
6672 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6673 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6674 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6675 pFpuCtx->FTW |= RT_BIT(iReg);
6676 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6677}
6678
6679
6680/**
6681 * Only updates the FPU status word (FSW) with the result of the current
6682 * instruction.
6683 *
6684 * @param pFpuCtx The FPU context.
6685 * @param u16FSW The FSW output of the current instruction.
6686 */
6687IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6688{
6689 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6690 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6691}
6692
6693
6694/**
6695 * Pops one item off the FPU stack if no pending exception prevents it.
6696 *
6697 * @param pFpuCtx The FPU context.
6698 */
6699IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6700{
6701 /* Check pending exceptions. */
6702 uint16_t uFSW = pFpuCtx->FSW;
6703 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6704 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6705 return;
6706
6707 /* TOP--. */
6708 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6709 uFSW &= ~X86_FSW_TOP_MASK;
6710 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6711 pFpuCtx->FSW = uFSW;
6712
6713 /* Mark the previous ST0 as empty. */
6714 iOldTop >>= X86_FSW_TOP_SHIFT;
6715 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6716
6717 /* Rotate the registers. */
6718 iemFpuRotateStackPop(pFpuCtx);
6719}
6720
6721
6722/**
6723 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6724 *
6725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6726 * @param pResult The FPU operation result to push.
6727 */
6728IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6729{
6730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6731 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6732 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6733 iemFpuMaybePushResult(pResult, pFpuCtx);
6734}
6735
6736
6737/**
6738 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6739 * and sets FPUDP and FPUDS.
6740 *
6741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6742 * @param pResult The FPU operation result to push.
6743 * @param iEffSeg The effective segment register.
6744 * @param GCPtrEff The effective address relative to @a iEffSeg.
6745 */
6746IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6747{
6748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6749 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6750 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6752 iemFpuMaybePushResult(pResult, pFpuCtx);
6753}
6754
6755
6756/**
6757 * Replace ST0 with the first value and push the second onto the FPU stack,
6758 * unless a pending exception prevents it.
6759 *
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 * @param pResult The FPU operation result to store and push.
6762 */
6763IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6764{
6765 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6766 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6767 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6768
6769 /* Update FSW and bail if there are pending exceptions afterwards. */
6770 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6771 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6772 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6773 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6774 {
6775 pFpuCtx->FSW = fFsw;
6776 return;
6777 }
6778
6779 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6780 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6781 {
6782 /* All is fine, push the actual value. */
6783 pFpuCtx->FTW |= RT_BIT(iNewTop);
6784 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6785 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6786 }
6787 else if (pFpuCtx->FCW & X86_FCW_IM)
6788 {
6789 /* Masked stack overflow, push QNaN. */
6790 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6791 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6792 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6793 }
6794 else
6795 {
6796 /* Raise stack overflow, don't push anything. */
6797 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6798 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6799 return;
6800 }
6801
6802 fFsw &= ~X86_FSW_TOP_MASK;
6803 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6804 pFpuCtx->FSW = fFsw;
6805
6806 iemFpuRotateStackPush(pFpuCtx);
6807}
6808
6809
6810/**
6811 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6812 * FOP.
6813 *
6814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6815 * @param pResult The result to store.
6816 * @param iStReg Which FPU register to store it in.
6817 */
6818IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6819{
6820 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6821 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6822 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6823 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6824}
6825
6826
6827/**
6828 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6829 * FOP, and then pops the stack.
6830 *
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 * @param pResult The result to store.
6833 * @param iStReg Which FPU register to store it in.
6834 */
6835IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6836{
6837 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6838 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6839 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6840 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6841 iemFpuMaybePopOne(pFpuCtx);
6842}
6843
6844
6845/**
6846 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6847 * FPUDP, and FPUDS.
6848 *
6849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6850 * @param pResult The result to store.
6851 * @param iStReg Which FPU register to store it in.
6852 * @param iEffSeg The effective memory operand selector register.
6853 * @param GCPtrEff The effective memory operand offset.
6854 */
6855IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6856 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6857{
6858 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6859 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6860 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6861 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6862 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6863}
6864
6865
6866/**
6867 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6868 * FPUDP, and FPUDS, and then pops the stack.
6869 *
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 * @param pResult The result to store.
6872 * @param iStReg Which FPU register to store it in.
6873 * @param iEffSeg The effective memory operand selector register.
6874 * @param GCPtrEff The effective memory operand offset.
6875 */
6876IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6877 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6878{
6879 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6880 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6881 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6882 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6883 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6884 iemFpuMaybePopOne(pFpuCtx);
6885}
6886
6887
6888/**
6889 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6890 *
6891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6892 */
6893IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6894{
6895 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6896 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6897 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6898}
6899
6900
6901/**
6902 * Marks the specified stack register as free (for FFREE).
6903 *
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 * @param iStReg The register to free.
6906 */
6907IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6908{
6909 Assert(iStReg < 8);
6910 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6911 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6912 pFpuCtx->FTW &= ~RT_BIT(iReg);
6913}
6914
6915
6916/**
6917 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 */
6921IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6922{
6923 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6924 uint16_t uFsw = pFpuCtx->FSW;
6925 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6926 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6927 uFsw &= ~X86_FSW_TOP_MASK;
6928 uFsw |= uTop;
6929 pFpuCtx->FSW = uFsw;
6930}
6931
6932
6933/**
6934 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 */
6938IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6939{
6940 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6941 uint16_t uFsw = pFpuCtx->FSW;
6942 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6943 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6944 uFsw &= ~X86_FSW_TOP_MASK;
6945 uFsw |= uTop;
6946 pFpuCtx->FSW = uFsw;
6947}
6948
6949
6950/**
6951 * Updates the FSW, FOP, FPUIP, and FPUCS.
6952 *
6953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6954 * @param u16FSW The FSW from the current instruction.
6955 */
6956IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6957{
6958 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6959 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6960 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6961 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6962}
6963
6964
6965/**
6966 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6967 *
6968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6969 * @param u16FSW The FSW from the current instruction.
6970 */
6971IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6972{
6973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6974 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6975 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6976 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6977 iemFpuMaybePopOne(pFpuCtx);
6978}
6979
6980
6981/**
6982 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6983 *
6984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6985 * @param u16FSW The FSW from the current instruction.
6986 * @param iEffSeg The effective memory operand selector register.
6987 * @param GCPtrEff The effective memory operand offset.
6988 */
6989IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6990{
6991 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6992 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6993 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6994 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6995 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6996}
6997
6998
6999/**
7000 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7001 *
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 * @param u16FSW The FSW from the current instruction.
7004 */
7005IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7006{
7007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7008 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7009 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7010 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7011 iemFpuMaybePopOne(pFpuCtx);
7012 iemFpuMaybePopOne(pFpuCtx);
7013}
7014
7015
7016/**
7017 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param u16FSW The FSW from the current instruction.
7021 * @param iEffSeg The effective memory operand selector register.
7022 * @param GCPtrEff The effective memory operand offset.
7023 */
7024IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7025{
7026 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7027 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7028 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7029 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7030 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7031 iemFpuMaybePopOne(pFpuCtx);
7032}
7033
7034
7035/**
7036 * Worker routine for raising an FPU stack underflow exception.
7037 *
7038 * @param pFpuCtx The FPU context.
7039 * @param iStReg The stack register being accessed.
7040 */
7041IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7042{
7043 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7044 if (pFpuCtx->FCW & X86_FCW_IM)
7045 {
7046 /* Masked underflow. */
7047 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7048 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7049 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7050 if (iStReg != UINT8_MAX)
7051 {
7052 pFpuCtx->FTW |= RT_BIT(iReg);
7053 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7054 }
7055 }
7056 else
7057 {
7058 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7059 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7060 }
7061}
7062
7063
7064/**
7065 * Raises a FPU stack underflow exception.
7066 *
7067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7068 * @param iStReg The destination register that should be loaded
7069 * with QNaN if \#IS is not masked. Specify
7070 * UINT8_MAX if none (like for fcom).
7071 */
7072DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7073{
7074 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7075 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7076 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7077 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7078}
7079
7080
7081DECL_NO_INLINE(IEM_STATIC, void)
7082iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7083{
7084 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7085 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7086 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7087 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7088 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7089}
7090
7091
7092DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7093{
7094 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7095 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7096 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7097 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7098 iemFpuMaybePopOne(pFpuCtx);
7099}
7100
7101
7102DECL_NO_INLINE(IEM_STATIC, void)
7103iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7104{
7105 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7106 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7107 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7108 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7109 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7110 iemFpuMaybePopOne(pFpuCtx);
7111}
7112
7113
7114DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7115{
7116 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7117 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7118 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7119 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7120 iemFpuMaybePopOne(pFpuCtx);
7121 iemFpuMaybePopOne(pFpuCtx);
7122}
7123
7124
7125DECL_NO_INLINE(IEM_STATIC, void)
7126iemFpuStackPushUnderflow(PVMCPU pVCpu)
7127{
7128 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7129 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7130 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7131
7132 if (pFpuCtx->FCW & X86_FCW_IM)
7133 {
7134 /* Masked overflow - Push QNaN. */
7135 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7136 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7137 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7138 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7139 pFpuCtx->FTW |= RT_BIT(iNewTop);
7140 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7141 iemFpuRotateStackPush(pFpuCtx);
7142 }
7143 else
7144 {
7145 /* Exception pending - don't change TOP or the register stack. */
7146 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7147 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7148 }
7149}
7150
7151
7152DECL_NO_INLINE(IEM_STATIC, void)
7153iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7154{
7155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7156 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7157 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7158
7159 if (pFpuCtx->FCW & X86_FCW_IM)
7160 {
7161 /* Masked overflow - Push QNaN. */
7162 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7163 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7164 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7165 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7166 pFpuCtx->FTW |= RT_BIT(iNewTop);
7167 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7168 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7169 iemFpuRotateStackPush(pFpuCtx);
7170 }
7171 else
7172 {
7173 /* Exception pending - don't change TOP or the register stack. */
7174 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7175 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7176 }
7177}
7178
7179
7180/**
7181 * Worker routine for raising an FPU stack overflow exception on a push.
7182 *
7183 * @param pFpuCtx The FPU context.
7184 */
7185IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7186{
7187 if (pFpuCtx->FCW & X86_FCW_IM)
7188 {
7189 /* Masked overflow. */
7190 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7191 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7192 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7193 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7194 pFpuCtx->FTW |= RT_BIT(iNewTop);
7195 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7196 iemFpuRotateStackPush(pFpuCtx);
7197 }
7198 else
7199 {
7200 /* Exception pending - don't change TOP or the register stack. */
7201 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7202 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7203 }
7204}
7205
7206
7207/**
7208 * Raises a FPU stack overflow exception on a push.
7209 *
7210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7211 */
7212DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7213{
7214 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7215 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7216 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7217 iemFpuStackPushOverflowOnly(pFpuCtx);
7218}
7219
7220
7221/**
7222 * Raises a FPU stack overflow exception on a push with a memory operand.
7223 *
7224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7225 * @param iEffSeg The effective memory operand selector register.
7226 * @param GCPtrEff The effective memory operand offset.
7227 */
7228DECL_NO_INLINE(IEM_STATIC, void)
7229iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7230{
7231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7232 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7233 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7234 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7235 iemFpuStackPushOverflowOnly(pFpuCtx);
7236}
7237
7238
7239IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7240{
7241 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7242 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7243 if (pFpuCtx->FTW & RT_BIT(iReg))
7244 return VINF_SUCCESS;
7245 return VERR_NOT_FOUND;
7246}
7247
7248
7249IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7250{
7251 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7252 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7253 if (pFpuCtx->FTW & RT_BIT(iReg))
7254 {
7255 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7256 return VINF_SUCCESS;
7257 }
7258 return VERR_NOT_FOUND;
7259}
7260
7261
7262IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7263 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7264{
7265 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7266 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7267 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7268 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7269 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7270 {
7271 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7272 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7273 return VINF_SUCCESS;
7274 }
7275 return VERR_NOT_FOUND;
7276}
7277
7278
7279IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7280{
7281 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7282 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7283 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7284 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7285 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7286 {
7287 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7288 return VINF_SUCCESS;
7289 }
7290 return VERR_NOT_FOUND;
7291}
7292
7293
7294/**
7295 * Updates the FPU exception status after FCW is changed.
7296 *
7297 * @param pFpuCtx The FPU context.
7298 */
7299IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7300{
7301 uint16_t u16Fsw = pFpuCtx->FSW;
7302 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7303 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7304 else
7305 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7306 pFpuCtx->FSW = u16Fsw;
7307}
7308
7309
7310/**
7311 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7312 *
7313 * @returns The full FTW.
7314 * @param pFpuCtx The FPU context.
7315 */
7316IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7317{
7318 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7319 uint16_t u16Ftw = 0;
7320 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7321 for (unsigned iSt = 0; iSt < 8; iSt++)
7322 {
7323 unsigned const iReg = (iSt + iTop) & 7;
7324 if (!(u8Ftw & RT_BIT(iReg)))
7325 u16Ftw |= 3 << (iReg * 2); /* empty */
7326 else
7327 {
7328 uint16_t uTag;
7329 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7330 if (pr80Reg->s.uExponent == 0x7fff)
7331 uTag = 2; /* Exponent is all 1's => Special. */
7332 else if (pr80Reg->s.uExponent == 0x0000)
7333 {
7334 if (pr80Reg->s.u64Mantissa == 0x0000)
7335 uTag = 1; /* All bits are zero => Zero. */
7336 else
7337 uTag = 2; /* Must be special. */
7338 }
7339 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7340 uTag = 0; /* Valid. */
7341 else
7342 uTag = 2; /* Must be special. */
7343
7344 u16Ftw |= uTag << (iReg * 2); /* empty */
7345 }
7346 }
7347
7348 return u16Ftw;
7349}
7350
7351
7352/**
7353 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7354 *
7355 * @returns The compressed FTW.
7356 * @param u16FullFtw The full FTW to convert.
7357 */
7358IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7359{
7360 uint8_t u8Ftw = 0;
7361 for (unsigned i = 0; i < 8; i++)
7362 {
7363 if ((u16FullFtw & 3) != 3 /*empty*/)
7364 u8Ftw |= RT_BIT(i);
7365 u16FullFtw >>= 2;
7366 }
7367
7368 return u8Ftw;
7369}
7370
7371/** @} */
7372
7373
7374/** @name Memory access.
7375 *
7376 * @{
7377 */
7378
7379
7380/**
7381 * Updates the IEMCPU::cbWritten counter if applicable.
7382 *
7383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7384 * @param fAccess The access being accounted for.
7385 * @param cbMem The access size.
7386 */
7387DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7388{
7389 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7390 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7391 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7392}
7393
7394
7395/**
7396 * Checks if the given segment can be written to, raise the appropriate
7397 * exception if not.
7398 *
7399 * @returns VBox strict status code.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 * @param pHid Pointer to the hidden register.
7403 * @param iSegReg The register number.
7404 * @param pu64BaseAddr Where to return the base address to use for the
7405 * segment. (In 64-bit code it may differ from the
7406 * base in the hidden segment.)
7407 */
7408IEM_STATIC VBOXSTRICTRC
7409iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7410{
7411 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7412 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7413 else
7414 {
7415 if (!pHid->Attr.n.u1Present)
7416 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7417
7418 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7419 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7420 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7421 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7422 *pu64BaseAddr = pHid->u64Base;
7423 }
7424 return VINF_SUCCESS;
7425}
7426
7427
7428/**
7429 * Checks if the given segment can be read from, raise the appropriate
7430 * exception if not.
7431 *
7432 * @returns VBox strict status code.
7433 *
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 * @param pHid Pointer to the hidden register.
7436 * @param iSegReg The register number.
7437 * @param pu64BaseAddr Where to return the base address to use for the
7438 * segment. (In 64-bit code it may differ from the
7439 * base in the hidden segment.)
7440 */
7441IEM_STATIC VBOXSTRICTRC
7442iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7443{
7444 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7445 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7446 else
7447 {
7448 if (!pHid->Attr.n.u1Present)
7449 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7450
7451 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7452 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7453 *pu64BaseAddr = pHid->u64Base;
7454 }
7455 return VINF_SUCCESS;
7456}
7457
7458
7459/**
7460 * Applies the segment limit, base and attributes.
7461 *
7462 * This may raise a \#GP or \#SS.
7463 *
7464 * @returns VBox strict status code.
7465 *
7466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7467 * @param fAccess The kind of access which is being performed.
7468 * @param iSegReg The index of the segment register to apply.
7469 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7470 * TSS, ++).
7471 * @param cbMem The access size.
7472 * @param pGCPtrMem Pointer to the guest memory address to apply
7473 * segmentation to. Input and output parameter.
7474 */
7475IEM_STATIC VBOXSTRICTRC
7476iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7477{
7478 if (iSegReg == UINT8_MAX)
7479 return VINF_SUCCESS;
7480
7481 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7482 switch (pVCpu->iem.s.enmCpuMode)
7483 {
7484 case IEMMODE_16BIT:
7485 case IEMMODE_32BIT:
7486 {
7487 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7488 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7489
7490 if ( pSel->Attr.n.u1Present
7491 && !pSel->Attr.n.u1Unusable)
7492 {
7493 Assert(pSel->Attr.n.u1DescType);
7494 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7495 {
7496 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7497 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7498 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7499
7500 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7501 {
7502 /** @todo CPL check. */
7503 }
7504
7505 /*
7506 * There are two kinds of data selectors, normal and expand down.
7507 */
7508 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7509 {
7510 if ( GCPtrFirst32 > pSel->u32Limit
7511 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7512 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7513 }
7514 else
7515 {
7516 /*
7517 * The upper boundary is defined by the B bit, not the G bit!
7518 */
7519 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7520 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7521 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7522 }
7523 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7524 }
7525 else
7526 {
7527
7528 /*
7529 * Code selector and usually be used to read thru, writing is
7530 * only permitted in real and V8086 mode.
7531 */
7532 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7533 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7534 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7535 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7536 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7537
7538 if ( GCPtrFirst32 > pSel->u32Limit
7539 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7540 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7541
7542 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7543 {
7544 /** @todo CPL check. */
7545 }
7546
7547 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7548 }
7549 }
7550 else
7551 return iemRaiseGeneralProtectionFault0(pVCpu);
7552 return VINF_SUCCESS;
7553 }
7554
7555 case IEMMODE_64BIT:
7556 {
7557 RTGCPTR GCPtrMem = *pGCPtrMem;
7558 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7559 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7560
7561 Assert(cbMem >= 1);
7562 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7563 return VINF_SUCCESS;
7564 return iemRaiseGeneralProtectionFault0(pVCpu);
7565 }
7566
7567 default:
7568 AssertFailedReturn(VERR_IEM_IPE_7);
7569 }
7570}
7571
7572
7573/**
7574 * Translates a virtual address to a physical physical address and checks if we
7575 * can access the page as specified.
7576 *
7577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7578 * @param GCPtrMem The virtual address.
7579 * @param fAccess The intended access.
7580 * @param pGCPhysMem Where to return the physical address.
7581 */
7582IEM_STATIC VBOXSTRICTRC
7583iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7584{
7585 /** @todo Need a different PGM interface here. We're currently using
7586 * generic / REM interfaces. this won't cut it for R0 & RC. */
7587 RTGCPHYS GCPhys;
7588 uint64_t fFlags;
7589 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7590 if (RT_FAILURE(rc))
7591 {
7592 /** @todo Check unassigned memory in unpaged mode. */
7593 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7594 *pGCPhysMem = NIL_RTGCPHYS;
7595 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7596 }
7597
7598 /* If the page is writable and does not have the no-exec bit set, all
7599 access is allowed. Otherwise we'll have to check more carefully... */
7600 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7601 {
7602 /* Write to read only memory? */
7603 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7604 && !(fFlags & X86_PTE_RW)
7605 && ( pVCpu->iem.s.uCpl == 3
7606 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7607 {
7608 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7609 *pGCPhysMem = NIL_RTGCPHYS;
7610 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7611 }
7612
7613 /* Kernel memory accessed by userland? */
7614 if ( !(fFlags & X86_PTE_US)
7615 && pVCpu->iem.s.uCpl == 3
7616 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7617 {
7618 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7619 *pGCPhysMem = NIL_RTGCPHYS;
7620 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7621 }
7622
7623 /* Executing non-executable memory? */
7624 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7625 && (fFlags & X86_PTE_PAE_NX)
7626 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7627 {
7628 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7629 *pGCPhysMem = NIL_RTGCPHYS;
7630 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7631 VERR_ACCESS_DENIED);
7632 }
7633 }
7634
7635 /*
7636 * Set the dirty / access flags.
7637 * ASSUMES this is set when the address is translated rather than on committ...
7638 */
7639 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7640 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7641 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7642 {
7643 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7644 AssertRC(rc2);
7645 }
7646
7647 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7648 *pGCPhysMem = GCPhys;
7649 return VINF_SUCCESS;
7650}
7651
7652
7653
7654/**
7655 * Maps a physical page.
7656 *
7657 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7659 * @param GCPhysMem The physical address.
7660 * @param fAccess The intended access.
7661 * @param ppvMem Where to return the mapping address.
7662 * @param pLock The PGM lock.
7663 */
7664IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7665{
7666#ifdef IEM_VERIFICATION_MODE_FULL
7667 /* Force the alternative path so we can ignore writes. */
7668 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7669 {
7670 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7671 {
7672 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7673 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7674 if (RT_FAILURE(rc2))
7675 pVCpu->iem.s.fProblematicMemory = true;
7676 }
7677 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7678 }
7679#endif
7680#ifdef IEM_LOG_MEMORY_WRITES
7681 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7682 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7683#endif
7684#ifdef IEM_VERIFICATION_MODE_MINIMAL
7685 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7686#endif
7687
7688 /** @todo This API may require some improving later. A private deal with PGM
7689 * regarding locking and unlocking needs to be struct. A couple of TLBs
7690 * living in PGM, but with publicly accessible inlined access methods
7691 * could perhaps be an even better solution. */
7692 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7693 GCPhysMem,
7694 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7695 pVCpu->iem.s.fBypassHandlers,
7696 ppvMem,
7697 pLock);
7698 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7699 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7700
7701#ifdef IEM_VERIFICATION_MODE_FULL
7702 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7703 pVCpu->iem.s.fProblematicMemory = true;
7704#endif
7705 return rc;
7706}
7707
7708
7709/**
7710 * Unmap a page previously mapped by iemMemPageMap.
7711 *
7712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7713 * @param GCPhysMem The physical address.
7714 * @param fAccess The intended access.
7715 * @param pvMem What iemMemPageMap returned.
7716 * @param pLock The PGM lock.
7717 */
7718DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7719{
7720 NOREF(pVCpu);
7721 NOREF(GCPhysMem);
7722 NOREF(fAccess);
7723 NOREF(pvMem);
7724 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7725}
7726
7727
7728/**
7729 * Looks up a memory mapping entry.
7730 *
7731 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param pvMem The memory address.
7734 * @param fAccess The access to.
7735 */
7736DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7737{
7738 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7739 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7740 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7741 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7742 return 0;
7743 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7744 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7745 return 1;
7746 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7747 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7748 return 2;
7749 return VERR_NOT_FOUND;
7750}
7751
7752
7753/**
7754 * Finds a free memmap entry when using iNextMapping doesn't work.
7755 *
7756 * @returns Memory mapping index, 1024 on failure.
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 */
7759IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7760{
7761 /*
7762 * The easy case.
7763 */
7764 if (pVCpu->iem.s.cActiveMappings == 0)
7765 {
7766 pVCpu->iem.s.iNextMapping = 1;
7767 return 0;
7768 }
7769
7770 /* There should be enough mappings for all instructions. */
7771 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7772
7773 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7774 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7775 return i;
7776
7777 AssertFailedReturn(1024);
7778}
7779
7780
7781/**
7782 * Commits a bounce buffer that needs writing back and unmaps it.
7783 *
7784 * @returns Strict VBox status code.
7785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7786 * @param iMemMap The index of the buffer to commit.
7787 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7788 * Always false in ring-3, obviously.
7789 */
7790IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7791{
7792 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7793 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7794#ifdef IN_RING3
7795 Assert(!fPostponeFail);
7796 RT_NOREF_PV(fPostponeFail);
7797#endif
7798
7799 /*
7800 * Do the writing.
7801 */
7802#ifndef IEM_VERIFICATION_MODE_MINIMAL
7803 PVM pVM = pVCpu->CTX_SUFF(pVM);
7804 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7805 && !IEM_VERIFICATION_ENABLED(pVCpu))
7806 {
7807 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7808 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7809 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7810 if (!pVCpu->iem.s.fBypassHandlers)
7811 {
7812 /*
7813 * Carefully and efficiently dealing with access handler return
7814 * codes make this a little bloated.
7815 */
7816 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7818 pbBuf,
7819 cbFirst,
7820 PGMACCESSORIGIN_IEM);
7821 if (rcStrict == VINF_SUCCESS)
7822 {
7823 if (cbSecond)
7824 {
7825 rcStrict = PGMPhysWrite(pVM,
7826 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7827 pbBuf + cbFirst,
7828 cbSecond,
7829 PGMACCESSORIGIN_IEM);
7830 if (rcStrict == VINF_SUCCESS)
7831 { /* nothing */ }
7832 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7833 {
7834 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7837 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7838 }
7839# ifndef IN_RING3
7840 else if (fPostponeFail)
7841 {
7842 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7843 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7845 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7846 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7847 return iemSetPassUpStatus(pVCpu, rcStrict);
7848 }
7849# endif
7850 else
7851 {
7852 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7855 return rcStrict;
7856 }
7857 }
7858 }
7859 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7860 {
7861 if (!cbSecond)
7862 {
7863 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7865 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7866 }
7867 else
7868 {
7869 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7871 pbBuf + cbFirst,
7872 cbSecond,
7873 PGMACCESSORIGIN_IEM);
7874 if (rcStrict2 == VINF_SUCCESS)
7875 {
7876 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7879 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7880 }
7881 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7882 {
7883 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7885 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7886 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7887 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7888 }
7889# ifndef IN_RING3
7890 else if (fPostponeFail)
7891 {
7892 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7895 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7896 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7897 return iemSetPassUpStatus(pVCpu, rcStrict);
7898 }
7899# endif
7900 else
7901 {
7902 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7905 return rcStrict2;
7906 }
7907 }
7908 }
7909# ifndef IN_RING3
7910 else if (fPostponeFail)
7911 {
7912 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7915 if (!cbSecond)
7916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7917 else
7918 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7919 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7920 return iemSetPassUpStatus(pVCpu, rcStrict);
7921 }
7922# endif
7923 else
7924 {
7925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7928 return rcStrict;
7929 }
7930 }
7931 else
7932 {
7933 /*
7934 * No access handlers, much simpler.
7935 */
7936 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7937 if (RT_SUCCESS(rc))
7938 {
7939 if (cbSecond)
7940 {
7941 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7942 if (RT_SUCCESS(rc))
7943 { /* likely */ }
7944 else
7945 {
7946 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7947 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7949 return rc;
7950 }
7951 }
7952 }
7953 else
7954 {
7955 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7956 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7958 return rc;
7959 }
7960 }
7961 }
7962#endif
7963
7964#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7965 /*
7966 * Record the write(s).
7967 */
7968 if (!pVCpu->iem.s.fNoRem)
7969 {
7970 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7971 if (pEvtRec)
7972 {
7973 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7974 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7975 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7976 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7977 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7978 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7979 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7980 }
7981 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7982 {
7983 pEvtRec = iemVerifyAllocRecord(pVCpu);
7984 if (pEvtRec)
7985 {
7986 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7987 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7988 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7989 memcpy(pEvtRec->u.RamWrite.ab,
7990 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7991 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7992 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7993 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7994 }
7995 }
7996 }
7997#endif
7998#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7999 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8000 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8001 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8002 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8003 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8004 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8005
8006 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8007 g_cbIemWrote = cbWrote;
8008 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8009#endif
8010
8011 /*
8012 * Free the mapping entry.
8013 */
8014 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8015 Assert(pVCpu->iem.s.cActiveMappings != 0);
8016 pVCpu->iem.s.cActiveMappings--;
8017 return VINF_SUCCESS;
8018}
8019
8020
8021/**
8022 * iemMemMap worker that deals with a request crossing pages.
8023 */
8024IEM_STATIC VBOXSTRICTRC
8025iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8026{
8027 /*
8028 * Do the address translations.
8029 */
8030 RTGCPHYS GCPhysFirst;
8031 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8032 if (rcStrict != VINF_SUCCESS)
8033 return rcStrict;
8034
8035 RTGCPHYS GCPhysSecond;
8036 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8037 fAccess, &GCPhysSecond);
8038 if (rcStrict != VINF_SUCCESS)
8039 return rcStrict;
8040 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8041
8042 PVM pVM = pVCpu->CTX_SUFF(pVM);
8043#ifdef IEM_VERIFICATION_MODE_FULL
8044 /*
8045 * Detect problematic memory when verifying so we can select
8046 * the right execution engine. (TLB: Redo this.)
8047 */
8048 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8049 {
8050 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8051 if (RT_SUCCESS(rc2))
8052 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8053 if (RT_FAILURE(rc2))
8054 pVCpu->iem.s.fProblematicMemory = true;
8055 }
8056#endif
8057
8058
8059 /*
8060 * Read in the current memory content if it's a read, execute or partial
8061 * write access.
8062 */
8063 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8064 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8065 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8066
8067 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8068 {
8069 if (!pVCpu->iem.s.fBypassHandlers)
8070 {
8071 /*
8072 * Must carefully deal with access handler status codes here,
8073 * makes the code a bit bloated.
8074 */
8075 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8076 if (rcStrict == VINF_SUCCESS)
8077 {
8078 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8079 if (rcStrict == VINF_SUCCESS)
8080 { /*likely */ }
8081 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8083 else
8084 {
8085 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8086 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8087 return rcStrict;
8088 }
8089 }
8090 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8091 {
8092 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8093 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8094 {
8095 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8096 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8097 }
8098 else
8099 {
8100 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8101 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8102 return rcStrict2;
8103 }
8104 }
8105 else
8106 {
8107 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8108 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8109 return rcStrict;
8110 }
8111 }
8112 else
8113 {
8114 /*
8115 * No informational status codes here, much more straight forward.
8116 */
8117 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8118 if (RT_SUCCESS(rc))
8119 {
8120 Assert(rc == VINF_SUCCESS);
8121 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8122 if (RT_SUCCESS(rc))
8123 Assert(rc == VINF_SUCCESS);
8124 else
8125 {
8126 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8127 return rc;
8128 }
8129 }
8130 else
8131 {
8132 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8133 return rc;
8134 }
8135 }
8136
8137#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8138 if ( !pVCpu->iem.s.fNoRem
8139 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8140 {
8141 /*
8142 * Record the reads.
8143 */
8144 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8145 if (pEvtRec)
8146 {
8147 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8148 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8149 pEvtRec->u.RamRead.cb = cbFirstPage;
8150 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8151 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8152 }
8153 pEvtRec = iemVerifyAllocRecord(pVCpu);
8154 if (pEvtRec)
8155 {
8156 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8157 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8158 pEvtRec->u.RamRead.cb = cbSecondPage;
8159 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8160 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8161 }
8162 }
8163#endif
8164 }
8165#ifdef VBOX_STRICT
8166 else
8167 memset(pbBuf, 0xcc, cbMem);
8168 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8169 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8170#endif
8171
8172 /*
8173 * Commit the bounce buffer entry.
8174 */
8175 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8176 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8177 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8178 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8179 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8180 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8181 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8182 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8183 pVCpu->iem.s.cActiveMappings++;
8184
8185 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8186 *ppvMem = pbBuf;
8187 return VINF_SUCCESS;
8188}
8189
8190
8191/**
8192 * iemMemMap woker that deals with iemMemPageMap failures.
8193 */
8194IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8195 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8196{
8197 /*
8198 * Filter out conditions we can handle and the ones which shouldn't happen.
8199 */
8200 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8201 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8202 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8203 {
8204 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8205 return rcMap;
8206 }
8207 pVCpu->iem.s.cPotentialExits++;
8208
8209 /*
8210 * Read in the current memory content if it's a read, execute or partial
8211 * write access.
8212 */
8213 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8214 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8215 {
8216 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8217 memset(pbBuf, 0xff, cbMem);
8218 else
8219 {
8220 int rc;
8221 if (!pVCpu->iem.s.fBypassHandlers)
8222 {
8223 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8224 if (rcStrict == VINF_SUCCESS)
8225 { /* nothing */ }
8226 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8227 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8228 else
8229 {
8230 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8231 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8232 return rcStrict;
8233 }
8234 }
8235 else
8236 {
8237 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8238 if (RT_SUCCESS(rc))
8239 { /* likely */ }
8240 else
8241 {
8242 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8243 GCPhysFirst, rc));
8244 return rc;
8245 }
8246 }
8247 }
8248
8249#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8250 if ( !pVCpu->iem.s.fNoRem
8251 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8252 {
8253 /*
8254 * Record the read.
8255 */
8256 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8257 if (pEvtRec)
8258 {
8259 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8260 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8261 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8262 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8263 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8264 }
8265 }
8266#endif
8267 }
8268#ifdef VBOX_STRICT
8269 else
8270 memset(pbBuf, 0xcc, cbMem);
8271#endif
8272#ifdef VBOX_STRICT
8273 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8274 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8275#endif
8276
8277 /*
8278 * Commit the bounce buffer entry.
8279 */
8280 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8281 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8282 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8283 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8284 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8285 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8286 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8287 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8288 pVCpu->iem.s.cActiveMappings++;
8289
8290 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8291 *ppvMem = pbBuf;
8292 return VINF_SUCCESS;
8293}
8294
8295
8296
8297/**
8298 * Maps the specified guest memory for the given kind of access.
8299 *
8300 * This may be using bounce buffering of the memory if it's crossing a page
8301 * boundary or if there is an access handler installed for any of it. Because
8302 * of lock prefix guarantees, we're in for some extra clutter when this
8303 * happens.
8304 *
8305 * This may raise a \#GP, \#SS, \#PF or \#AC.
8306 *
8307 * @returns VBox strict status code.
8308 *
8309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8310 * @param ppvMem Where to return the pointer to the mapped
8311 * memory.
8312 * @param cbMem The number of bytes to map. This is usually 1,
8313 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8314 * string operations it can be up to a page.
8315 * @param iSegReg The index of the segment register to use for
8316 * this access. The base and limits are checked.
8317 * Use UINT8_MAX to indicate that no segmentation
8318 * is required (for IDT, GDT and LDT accesses).
8319 * @param GCPtrMem The address of the guest memory.
8320 * @param fAccess How the memory is being accessed. The
8321 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8322 * how to map the memory, while the
8323 * IEM_ACCESS_WHAT_XXX bit is used when raising
8324 * exceptions.
8325 */
8326IEM_STATIC VBOXSTRICTRC
8327iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8328{
8329 /*
8330 * Check the input and figure out which mapping entry to use.
8331 */
8332 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8333 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8334 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8335
8336 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8337 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8338 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8339 {
8340 iMemMap = iemMemMapFindFree(pVCpu);
8341 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8342 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8343 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8344 pVCpu->iem.s.aMemMappings[2].fAccess),
8345 VERR_IEM_IPE_9);
8346 }
8347
8348 /*
8349 * Map the memory, checking that we can actually access it. If something
8350 * slightly complicated happens, fall back on bounce buffering.
8351 */
8352 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8353 if (rcStrict != VINF_SUCCESS)
8354 return rcStrict;
8355
8356 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8357 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8358
8359 RTGCPHYS GCPhysFirst;
8360 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8361 if (rcStrict != VINF_SUCCESS)
8362 return rcStrict;
8363
8364 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8365 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8366 if (fAccess & IEM_ACCESS_TYPE_READ)
8367 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8368
8369 void *pvMem;
8370 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8371 if (rcStrict != VINF_SUCCESS)
8372 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8373
8374 /*
8375 * Fill in the mapping table entry.
8376 */
8377 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8378 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8379 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8380 pVCpu->iem.s.cActiveMappings++;
8381
8382 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8383 *ppvMem = pvMem;
8384 return VINF_SUCCESS;
8385}
8386
8387
8388/**
8389 * Commits the guest memory if bounce buffered and unmaps it.
8390 *
8391 * @returns Strict VBox status code.
8392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8393 * @param pvMem The mapping.
8394 * @param fAccess The kind of access.
8395 */
8396IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8397{
8398 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8399 AssertReturn(iMemMap >= 0, iMemMap);
8400
8401 /* If it's bounce buffered, we may need to write back the buffer. */
8402 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8403 {
8404 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8405 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8406 }
8407 /* Otherwise unlock it. */
8408 else
8409 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8410
8411 /* Free the entry. */
8412 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8413 Assert(pVCpu->iem.s.cActiveMappings != 0);
8414 pVCpu->iem.s.cActiveMappings--;
8415 return VINF_SUCCESS;
8416}
8417
8418#ifdef IEM_WITH_SETJMP
8419
8420/**
8421 * Maps the specified guest memory for the given kind of access, longjmp on
8422 * error.
8423 *
8424 * This may be using bounce buffering of the memory if it's crossing a page
8425 * boundary or if there is an access handler installed for any of it. Because
8426 * of lock prefix guarantees, we're in for some extra clutter when this
8427 * happens.
8428 *
8429 * This may raise a \#GP, \#SS, \#PF or \#AC.
8430 *
8431 * @returns Pointer to the mapped memory.
8432 *
8433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8434 * @param cbMem The number of bytes to map. This is usually 1,
8435 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8436 * string operations it can be up to a page.
8437 * @param iSegReg The index of the segment register to use for
8438 * this access. The base and limits are checked.
8439 * Use UINT8_MAX to indicate that no segmentation
8440 * is required (for IDT, GDT and LDT accesses).
8441 * @param GCPtrMem The address of the guest memory.
8442 * @param fAccess How the memory is being accessed. The
8443 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8444 * how to map the memory, while the
8445 * IEM_ACCESS_WHAT_XXX bit is used when raising
8446 * exceptions.
8447 */
8448IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8449{
8450 /*
8451 * Check the input and figure out which mapping entry to use.
8452 */
8453 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8454 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8455 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8456
8457 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8458 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8459 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8460 {
8461 iMemMap = iemMemMapFindFree(pVCpu);
8462 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8463 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8464 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8465 pVCpu->iem.s.aMemMappings[2].fAccess),
8466 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8467 }
8468
8469 /*
8470 * Map the memory, checking that we can actually access it. If something
8471 * slightly complicated happens, fall back on bounce buffering.
8472 */
8473 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8474 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8475 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8476
8477 /* Crossing a page boundary? */
8478 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8479 { /* No (likely). */ }
8480 else
8481 {
8482 void *pvMem;
8483 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8484 if (rcStrict == VINF_SUCCESS)
8485 return pvMem;
8486 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8487 }
8488
8489 RTGCPHYS GCPhysFirst;
8490 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8491 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8492 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8493
8494 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8495 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8496 if (fAccess & IEM_ACCESS_TYPE_READ)
8497 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8498
8499 void *pvMem;
8500 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8501 if (rcStrict == VINF_SUCCESS)
8502 { /* likely */ }
8503 else
8504 {
8505 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8506 if (rcStrict == VINF_SUCCESS)
8507 return pvMem;
8508 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8509 }
8510
8511 /*
8512 * Fill in the mapping table entry.
8513 */
8514 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8515 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8516 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8517 pVCpu->iem.s.cActiveMappings++;
8518
8519 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8520 return pvMem;
8521}
8522
8523
8524/**
8525 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8526 *
8527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8528 * @param pvMem The mapping.
8529 * @param fAccess The kind of access.
8530 */
8531IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8532{
8533 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8534 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8535
8536 /* If it's bounce buffered, we may need to write back the buffer. */
8537 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8538 {
8539 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8540 {
8541 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8542 if (rcStrict == VINF_SUCCESS)
8543 return;
8544 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8545 }
8546 }
8547 /* Otherwise unlock it. */
8548 else
8549 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8550
8551 /* Free the entry. */
8552 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8553 Assert(pVCpu->iem.s.cActiveMappings != 0);
8554 pVCpu->iem.s.cActiveMappings--;
8555}
8556
8557#endif
8558
8559#ifndef IN_RING3
8560/**
8561 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8562 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8563 *
8564 * Allows the instruction to be completed and retired, while the IEM user will
8565 * return to ring-3 immediately afterwards and do the postponed writes there.
8566 *
8567 * @returns VBox status code (no strict statuses). Caller must check
8568 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8570 * @param pvMem The mapping.
8571 * @param fAccess The kind of access.
8572 */
8573IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8574{
8575 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8576 AssertReturn(iMemMap >= 0, iMemMap);
8577
8578 /* If it's bounce buffered, we may need to write back the buffer. */
8579 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8580 {
8581 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8582 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8583 }
8584 /* Otherwise unlock it. */
8585 else
8586 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8587
8588 /* Free the entry. */
8589 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8590 Assert(pVCpu->iem.s.cActiveMappings != 0);
8591 pVCpu->iem.s.cActiveMappings--;
8592 return VINF_SUCCESS;
8593}
8594#endif
8595
8596
8597/**
8598 * Rollbacks mappings, releasing page locks and such.
8599 *
8600 * The caller shall only call this after checking cActiveMappings.
8601 *
8602 * @returns Strict VBox status code to pass up.
8603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8604 */
8605IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8606{
8607 Assert(pVCpu->iem.s.cActiveMappings > 0);
8608
8609 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8610 while (iMemMap-- > 0)
8611 {
8612 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8613 if (fAccess != IEM_ACCESS_INVALID)
8614 {
8615 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8616 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8617 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8618 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8619 Assert(pVCpu->iem.s.cActiveMappings > 0);
8620 pVCpu->iem.s.cActiveMappings--;
8621 }
8622 }
8623}
8624
8625
8626/**
8627 * Fetches a data byte.
8628 *
8629 * @returns Strict VBox status code.
8630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8631 * @param pu8Dst Where to return the byte.
8632 * @param iSegReg The index of the segment register to use for
8633 * this access. The base and limits are checked.
8634 * @param GCPtrMem The address of the guest memory.
8635 */
8636IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8637{
8638 /* The lazy approach for now... */
8639 uint8_t const *pu8Src;
8640 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8641 if (rc == VINF_SUCCESS)
8642 {
8643 *pu8Dst = *pu8Src;
8644 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8645 }
8646 return rc;
8647}
8648
8649
8650#ifdef IEM_WITH_SETJMP
8651/**
8652 * Fetches a data byte, longjmp on error.
8653 *
8654 * @returns The byte.
8655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8656 * @param iSegReg The index of the segment register to use for
8657 * this access. The base and limits are checked.
8658 * @param GCPtrMem The address of the guest memory.
8659 */
8660DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8661{
8662 /* The lazy approach for now... */
8663 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8664 uint8_t const bRet = *pu8Src;
8665 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8666 return bRet;
8667}
8668#endif /* IEM_WITH_SETJMP */
8669
8670
8671/**
8672 * Fetches a data word.
8673 *
8674 * @returns Strict VBox status code.
8675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8676 * @param pu16Dst Where to return the word.
8677 * @param iSegReg The index of the segment register to use for
8678 * this access. The base and limits are checked.
8679 * @param GCPtrMem The address of the guest memory.
8680 */
8681IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8682{
8683 /* The lazy approach for now... */
8684 uint16_t const *pu16Src;
8685 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8686 if (rc == VINF_SUCCESS)
8687 {
8688 *pu16Dst = *pu16Src;
8689 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8690 }
8691 return rc;
8692}
8693
8694
8695#ifdef IEM_WITH_SETJMP
8696/**
8697 * Fetches a data word, longjmp on error.
8698 *
8699 * @returns The word
8700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8701 * @param iSegReg The index of the segment register to use for
8702 * this access. The base and limits are checked.
8703 * @param GCPtrMem The address of the guest memory.
8704 */
8705DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8706{
8707 /* The lazy approach for now... */
8708 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8709 uint16_t const u16Ret = *pu16Src;
8710 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8711 return u16Ret;
8712}
8713#endif
8714
8715
8716/**
8717 * Fetches a data dword.
8718 *
8719 * @returns Strict VBox status code.
8720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8721 * @param pu32Dst Where to return the dword.
8722 * @param iSegReg The index of the segment register to use for
8723 * this access. The base and limits are checked.
8724 * @param GCPtrMem The address of the guest memory.
8725 */
8726IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8727{
8728 /* The lazy approach for now... */
8729 uint32_t const *pu32Src;
8730 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8731 if (rc == VINF_SUCCESS)
8732 {
8733 *pu32Dst = *pu32Src;
8734 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8735 }
8736 return rc;
8737}
8738
8739
8740#ifdef IEM_WITH_SETJMP
8741
8742IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8743{
8744 Assert(cbMem >= 1);
8745 Assert(iSegReg < X86_SREG_COUNT);
8746
8747 /*
8748 * 64-bit mode is simpler.
8749 */
8750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8751 {
8752 if (iSegReg >= X86_SREG_FS)
8753 {
8754 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8755 GCPtrMem += pSel->u64Base;
8756 }
8757
8758 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8759 return GCPtrMem;
8760 }
8761 /*
8762 * 16-bit and 32-bit segmentation.
8763 */
8764 else
8765 {
8766 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8767 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8768 == X86DESCATTR_P /* data, expand up */
8769 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8770 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8771 {
8772 /* expand up */
8773 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8774 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8775 && GCPtrLast32 > (uint32_t)GCPtrMem))
8776 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8777 }
8778 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8779 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8780 {
8781 /* expand down */
8782 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8783 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8784 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8785 && GCPtrLast32 > (uint32_t)GCPtrMem))
8786 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8787 }
8788 else
8789 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8790 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8791 }
8792 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8793}
8794
8795
8796IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8797{
8798 Assert(cbMem >= 1);
8799 Assert(iSegReg < X86_SREG_COUNT);
8800
8801 /*
8802 * 64-bit mode is simpler.
8803 */
8804 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8805 {
8806 if (iSegReg >= X86_SREG_FS)
8807 {
8808 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8809 GCPtrMem += pSel->u64Base;
8810 }
8811
8812 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8813 return GCPtrMem;
8814 }
8815 /*
8816 * 16-bit and 32-bit segmentation.
8817 */
8818 else
8819 {
8820 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8821 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8822 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8823 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8824 {
8825 /* expand up */
8826 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8827 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8828 && GCPtrLast32 > (uint32_t)GCPtrMem))
8829 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8830 }
8831 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8832 {
8833 /* expand down */
8834 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8835 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8836 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8837 && GCPtrLast32 > (uint32_t)GCPtrMem))
8838 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8839 }
8840 else
8841 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8842 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8843 }
8844 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8845}
8846
8847
8848/**
8849 * Fetches a data dword, longjmp on error, fallback/safe version.
8850 *
8851 * @returns The dword
8852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8853 * @param iSegReg The index of the segment register to use for
8854 * this access. The base and limits are checked.
8855 * @param GCPtrMem The address of the guest memory.
8856 */
8857IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8858{
8859 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8860 uint32_t const u32Ret = *pu32Src;
8861 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8862 return u32Ret;
8863}
8864
8865
8866/**
8867 * Fetches a data dword, longjmp on error.
8868 *
8869 * @returns The dword
8870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8871 * @param iSegReg The index of the segment register to use for
8872 * this access. The base and limits are checked.
8873 * @param GCPtrMem The address of the guest memory.
8874 */
8875DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8876{
8877# ifdef IEM_WITH_DATA_TLB
8878 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8879 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8880 {
8881 /// @todo more later.
8882 }
8883
8884 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8885# else
8886 /* The lazy approach. */
8887 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8888 uint32_t const u32Ret = *pu32Src;
8889 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8890 return u32Ret;
8891# endif
8892}
8893#endif
8894
8895
8896#ifdef SOME_UNUSED_FUNCTION
8897/**
8898 * Fetches a data dword and sign extends it to a qword.
8899 *
8900 * @returns Strict VBox status code.
8901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8902 * @param pu64Dst Where to return the sign extended value.
8903 * @param iSegReg The index of the segment register to use for
8904 * this access. The base and limits are checked.
8905 * @param GCPtrMem The address of the guest memory.
8906 */
8907IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8908{
8909 /* The lazy approach for now... */
8910 int32_t const *pi32Src;
8911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8912 if (rc == VINF_SUCCESS)
8913 {
8914 *pu64Dst = *pi32Src;
8915 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8916 }
8917#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8918 else
8919 *pu64Dst = 0;
8920#endif
8921 return rc;
8922}
8923#endif
8924
8925
8926/**
8927 * Fetches a data qword.
8928 *
8929 * @returns Strict VBox status code.
8930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8931 * @param pu64Dst Where to return the qword.
8932 * @param iSegReg The index of the segment register to use for
8933 * this access. The base and limits are checked.
8934 * @param GCPtrMem The address of the guest memory.
8935 */
8936IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8937{
8938 /* The lazy approach for now... */
8939 uint64_t const *pu64Src;
8940 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8941 if (rc == VINF_SUCCESS)
8942 {
8943 *pu64Dst = *pu64Src;
8944 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8945 }
8946 return rc;
8947}
8948
8949
8950#ifdef IEM_WITH_SETJMP
8951/**
8952 * Fetches a data qword, longjmp on error.
8953 *
8954 * @returns The qword.
8955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8956 * @param iSegReg The index of the segment register to use for
8957 * this access. The base and limits are checked.
8958 * @param GCPtrMem The address of the guest memory.
8959 */
8960DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8961{
8962 /* The lazy approach for now... */
8963 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8964 uint64_t const u64Ret = *pu64Src;
8965 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8966 return u64Ret;
8967}
8968#endif
8969
8970
8971/**
8972 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8973 *
8974 * @returns Strict VBox status code.
8975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8976 * @param pu64Dst Where to return the qword.
8977 * @param iSegReg The index of the segment register to use for
8978 * this access. The base and limits are checked.
8979 * @param GCPtrMem The address of the guest memory.
8980 */
8981IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8982{
8983 /* The lazy approach for now... */
8984 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8985 if (RT_UNLIKELY(GCPtrMem & 15))
8986 return iemRaiseGeneralProtectionFault0(pVCpu);
8987
8988 uint64_t const *pu64Src;
8989 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8990 if (rc == VINF_SUCCESS)
8991 {
8992 *pu64Dst = *pu64Src;
8993 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8994 }
8995 return rc;
8996}
8997
8998
8999#ifdef IEM_WITH_SETJMP
9000/**
9001 * Fetches a data qword, longjmp on error.
9002 *
9003 * @returns The qword.
9004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9005 * @param iSegReg The index of the segment register to use for
9006 * this access. The base and limits are checked.
9007 * @param GCPtrMem The address of the guest memory.
9008 */
9009DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9010{
9011 /* The lazy approach for now... */
9012 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9013 if (RT_LIKELY(!(GCPtrMem & 15)))
9014 {
9015 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9016 uint64_t const u64Ret = *pu64Src;
9017 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9018 return u64Ret;
9019 }
9020
9021 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9022 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9023}
9024#endif
9025
9026
9027/**
9028 * Fetches a data tword.
9029 *
9030 * @returns Strict VBox status code.
9031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9032 * @param pr80Dst Where to return the tword.
9033 * @param iSegReg The index of the segment register to use for
9034 * this access. The base and limits are checked.
9035 * @param GCPtrMem The address of the guest memory.
9036 */
9037IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9038{
9039 /* The lazy approach for now... */
9040 PCRTFLOAT80U pr80Src;
9041 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9042 if (rc == VINF_SUCCESS)
9043 {
9044 *pr80Dst = *pr80Src;
9045 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9046 }
9047 return rc;
9048}
9049
9050
9051#ifdef IEM_WITH_SETJMP
9052/**
9053 * Fetches a data tword, longjmp on error.
9054 *
9055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9056 * @param pr80Dst Where to return the tword.
9057 * @param iSegReg The index of the segment register to use for
9058 * this access. The base and limits are checked.
9059 * @param GCPtrMem The address of the guest memory.
9060 */
9061DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9062{
9063 /* The lazy approach for now... */
9064 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9065 *pr80Dst = *pr80Src;
9066 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9067}
9068#endif
9069
9070
9071/**
9072 * Fetches a data dqword (double qword), generally SSE related.
9073 *
9074 * @returns Strict VBox status code.
9075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9076 * @param pu128Dst Where to return the qword.
9077 * @param iSegReg The index of the segment register to use for
9078 * this access. The base and limits are checked.
9079 * @param GCPtrMem The address of the guest memory.
9080 */
9081IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9082{
9083 /* The lazy approach for now... */
9084 uint128_t const *pu128Src;
9085 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9086 if (rc == VINF_SUCCESS)
9087 {
9088 *pu128Dst = *pu128Src;
9089 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9090 }
9091 return rc;
9092}
9093
9094
9095#ifdef IEM_WITH_SETJMP
9096/**
9097 * Fetches a data dqword (double qword), generally SSE related.
9098 *
9099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9100 * @param pu128Dst Where to return the qword.
9101 * @param iSegReg The index of the segment register to use for
9102 * this access. The base and limits are checked.
9103 * @param GCPtrMem The address of the guest memory.
9104 */
9105IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9106{
9107 /* The lazy approach for now... */
9108 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9109 *pu128Dst = *pu128Src;
9110 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9111}
9112#endif
9113
9114
9115/**
9116 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9117 * related.
9118 *
9119 * Raises \#GP(0) if not aligned.
9120 *
9121 * @returns Strict VBox status code.
9122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9123 * @param pu128Dst Where to return the qword.
9124 * @param iSegReg The index of the segment register to use for
9125 * this access. The base and limits are checked.
9126 * @param GCPtrMem The address of the guest memory.
9127 */
9128IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9129{
9130 /* The lazy approach for now... */
9131 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9132 if ( (GCPtrMem & 15)
9133 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9134 return iemRaiseGeneralProtectionFault0(pVCpu);
9135
9136 uint128_t const *pu128Src;
9137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9138 if (rc == VINF_SUCCESS)
9139 {
9140 *pu128Dst = *pu128Src;
9141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9142 }
9143 return rc;
9144}
9145
9146
9147#ifdef IEM_WITH_SETJMP
9148/**
9149 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9150 * related, longjmp on error.
9151 *
9152 * Raises \#GP(0) if not aligned.
9153 *
9154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9155 * @param pu128Dst Where to return the qword.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 */
9160DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9161{
9162 /* The lazy approach for now... */
9163 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9164 if ( (GCPtrMem & 15) == 0
9165 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9166 {
9167 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9168 IEM_ACCESS_DATA_R);
9169 *pu128Dst = *pu128Src;
9170 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9171 return;
9172 }
9173
9174 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9175 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9176}
9177#endif
9178
9179
9180
9181/**
9182 * Fetches a descriptor register (lgdt, lidt).
9183 *
9184 * @returns Strict VBox status code.
9185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9186 * @param pcbLimit Where to return the limit.
9187 * @param pGCPtrBase Where to return the base.
9188 * @param iSegReg The index of the segment register to use for
9189 * this access. The base and limits are checked.
9190 * @param GCPtrMem The address of the guest memory.
9191 * @param enmOpSize The effective operand size.
9192 */
9193IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9194 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9195{
9196 /*
9197 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9198 * little special:
9199 * - The two reads are done separately.
9200 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9201 * - We suspect the 386 to actually commit the limit before the base in
9202 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9203 * don't try emulate this eccentric behavior, because it's not well
9204 * enough understood and rather hard to trigger.
9205 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9206 */
9207 VBOXSTRICTRC rcStrict;
9208 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9209 {
9210 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9211 if (rcStrict == VINF_SUCCESS)
9212 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9213 }
9214 else
9215 {
9216 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9217 if (enmOpSize == IEMMODE_32BIT)
9218 {
9219 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9220 {
9221 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9222 if (rcStrict == VINF_SUCCESS)
9223 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9224 }
9225 else
9226 {
9227 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9228 if (rcStrict == VINF_SUCCESS)
9229 {
9230 *pcbLimit = (uint16_t)uTmp;
9231 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9232 }
9233 }
9234 if (rcStrict == VINF_SUCCESS)
9235 *pGCPtrBase = uTmp;
9236 }
9237 else
9238 {
9239 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9240 if (rcStrict == VINF_SUCCESS)
9241 {
9242 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9243 if (rcStrict == VINF_SUCCESS)
9244 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9245 }
9246 }
9247 }
9248 return rcStrict;
9249}
9250
9251
9252
9253/**
9254 * Stores a data byte.
9255 *
9256 * @returns Strict VBox status code.
9257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9258 * @param iSegReg The index of the segment register to use for
9259 * this access. The base and limits are checked.
9260 * @param GCPtrMem The address of the guest memory.
9261 * @param u8Value The value to store.
9262 */
9263IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9264{
9265 /* The lazy approach for now... */
9266 uint8_t *pu8Dst;
9267 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9268 if (rc == VINF_SUCCESS)
9269 {
9270 *pu8Dst = u8Value;
9271 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9272 }
9273 return rc;
9274}
9275
9276
9277#ifdef IEM_WITH_SETJMP
9278/**
9279 * Stores a data byte, longjmp on error.
9280 *
9281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9282 * @param iSegReg The index of the segment register to use for
9283 * this access. The base and limits are checked.
9284 * @param GCPtrMem The address of the guest memory.
9285 * @param u8Value The value to store.
9286 */
9287IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9288{
9289 /* The lazy approach for now... */
9290 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9291 *pu8Dst = u8Value;
9292 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9293}
9294#endif
9295
9296
9297/**
9298 * Stores a data word.
9299 *
9300 * @returns Strict VBox status code.
9301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9302 * @param iSegReg The index of the segment register to use for
9303 * this access. The base and limits are checked.
9304 * @param GCPtrMem The address of the guest memory.
9305 * @param u16Value The value to store.
9306 */
9307IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9308{
9309 /* The lazy approach for now... */
9310 uint16_t *pu16Dst;
9311 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9312 if (rc == VINF_SUCCESS)
9313 {
9314 *pu16Dst = u16Value;
9315 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9316 }
9317 return rc;
9318}
9319
9320
9321#ifdef IEM_WITH_SETJMP
9322/**
9323 * Stores a data word, longjmp on error.
9324 *
9325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9326 * @param iSegReg The index of the segment register to use for
9327 * this access. The base and limits are checked.
9328 * @param GCPtrMem The address of the guest memory.
9329 * @param u16Value The value to store.
9330 */
9331IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9332{
9333 /* The lazy approach for now... */
9334 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9335 *pu16Dst = u16Value;
9336 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9337}
9338#endif
9339
9340
9341/**
9342 * Stores a data dword.
9343 *
9344 * @returns Strict VBox status code.
9345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9346 * @param iSegReg The index of the segment register to use for
9347 * this access. The base and limits are checked.
9348 * @param GCPtrMem The address of the guest memory.
9349 * @param u32Value The value to store.
9350 */
9351IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9352{
9353 /* The lazy approach for now... */
9354 uint32_t *pu32Dst;
9355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9356 if (rc == VINF_SUCCESS)
9357 {
9358 *pu32Dst = u32Value;
9359 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9360 }
9361 return rc;
9362}
9363
9364
9365#ifdef IEM_WITH_SETJMP
9366/**
9367 * Stores a data dword.
9368 *
9369 * @returns Strict VBox status code.
9370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9371 * @param iSegReg The index of the segment register to use for
9372 * this access. The base and limits are checked.
9373 * @param GCPtrMem The address of the guest memory.
9374 * @param u32Value The value to store.
9375 */
9376IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9377{
9378 /* The lazy approach for now... */
9379 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9380 *pu32Dst = u32Value;
9381 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9382}
9383#endif
9384
9385
9386/**
9387 * Stores a data qword.
9388 *
9389 * @returns Strict VBox status code.
9390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9391 * @param iSegReg The index of the segment register to use for
9392 * this access. The base and limits are checked.
9393 * @param GCPtrMem The address of the guest memory.
9394 * @param u64Value The value to store.
9395 */
9396IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9397{
9398 /* The lazy approach for now... */
9399 uint64_t *pu64Dst;
9400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9401 if (rc == VINF_SUCCESS)
9402 {
9403 *pu64Dst = u64Value;
9404 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9405 }
9406 return rc;
9407}
9408
9409
9410#ifdef IEM_WITH_SETJMP
9411/**
9412 * Stores a data qword, longjmp on error.
9413 *
9414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9415 * @param iSegReg The index of the segment register to use for
9416 * this access. The base and limits are checked.
9417 * @param GCPtrMem The address of the guest memory.
9418 * @param u64Value The value to store.
9419 */
9420IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9421{
9422 /* The lazy approach for now... */
9423 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9424 *pu64Dst = u64Value;
9425 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9426}
9427#endif
9428
9429
9430/**
9431 * Stores a data dqword.
9432 *
9433 * @returns Strict VBox status code.
9434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9435 * @param iSegReg The index of the segment register to use for
9436 * this access. The base and limits are checked.
9437 * @param GCPtrMem The address of the guest memory.
9438 * @param u128Value The value to store.
9439 */
9440IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9441{
9442 /* The lazy approach for now... */
9443 uint128_t *pu128Dst;
9444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9445 if (rc == VINF_SUCCESS)
9446 {
9447 *pu128Dst = u128Value;
9448 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9449 }
9450 return rc;
9451}
9452
9453
9454#ifdef IEM_WITH_SETJMP
9455/**
9456 * Stores a data dqword, longjmp on error.
9457 *
9458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9459 * @param iSegReg The index of the segment register to use for
9460 * this access. The base and limits are checked.
9461 * @param GCPtrMem The address of the guest memory.
9462 * @param u128Value The value to store.
9463 */
9464IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9465{
9466 /* The lazy approach for now... */
9467 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9468 *pu128Dst = u128Value;
9469 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9470}
9471#endif
9472
9473
9474/**
9475 * Stores a data dqword, SSE aligned.
9476 *
9477 * @returns Strict VBox status code.
9478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9479 * @param iSegReg The index of the segment register to use for
9480 * this access. The base and limits are checked.
9481 * @param GCPtrMem The address of the guest memory.
9482 * @param u128Value The value to store.
9483 */
9484IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9485{
9486 /* The lazy approach for now... */
9487 if ( (GCPtrMem & 15)
9488 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9489 return iemRaiseGeneralProtectionFault0(pVCpu);
9490
9491 uint128_t *pu128Dst;
9492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9493 if (rc == VINF_SUCCESS)
9494 {
9495 *pu128Dst = u128Value;
9496 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9497 }
9498 return rc;
9499}
9500
9501
9502#ifdef IEM_WITH_SETJMP
9503/**
9504 * Stores a data dqword, SSE aligned.
9505 *
9506 * @returns Strict VBox status code.
9507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9508 * @param iSegReg The index of the segment register to use for
9509 * this access. The base and limits are checked.
9510 * @param GCPtrMem The address of the guest memory.
9511 * @param u128Value The value to store.
9512 */
9513DECL_NO_INLINE(IEM_STATIC, void)
9514iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9515{
9516 /* The lazy approach for now... */
9517 if ( (GCPtrMem & 15) == 0
9518 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9519 {
9520 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9521 *pu128Dst = u128Value;
9522 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9523 return;
9524 }
9525
9526 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9527 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9528}
9529#endif
9530
9531
9532/**
9533 * Stores a descriptor register (sgdt, sidt).
9534 *
9535 * @returns Strict VBox status code.
9536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9537 * @param cbLimit The limit.
9538 * @param GCPtrBase The base address.
9539 * @param iSegReg The index of the segment register to use for
9540 * this access. The base and limits are checked.
9541 * @param GCPtrMem The address of the guest memory.
9542 */
9543IEM_STATIC VBOXSTRICTRC
9544iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /*
9547 * The SIDT and SGDT instructions actually stores the data using two
9548 * independent writes. The instructions does not respond to opsize prefixes.
9549 */
9550 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9551 if (rcStrict == VINF_SUCCESS)
9552 {
9553 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9554 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9555 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9556 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9557 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9558 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9559 else
9560 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9561 }
9562 return rcStrict;
9563}
9564
9565
9566/**
9567 * Pushes a word onto the stack.
9568 *
9569 * @returns Strict VBox status code.
9570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9571 * @param u16Value The value to push.
9572 */
9573IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9574{
9575 /* Increment the stack pointer. */
9576 uint64_t uNewRsp;
9577 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9578 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9579
9580 /* Write the word the lazy way. */
9581 uint16_t *pu16Dst;
9582 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9583 if (rc == VINF_SUCCESS)
9584 {
9585 *pu16Dst = u16Value;
9586 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9587 }
9588
9589 /* Commit the new RSP value unless we an access handler made trouble. */
9590 if (rc == VINF_SUCCESS)
9591 pCtx->rsp = uNewRsp;
9592
9593 return rc;
9594}
9595
9596
9597/**
9598 * Pushes a dword onto the stack.
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param u32Value The value to push.
9603 */
9604IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9605{
9606 /* Increment the stack pointer. */
9607 uint64_t uNewRsp;
9608 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9609 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9610
9611 /* Write the dword the lazy way. */
9612 uint32_t *pu32Dst;
9613 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9614 if (rc == VINF_SUCCESS)
9615 {
9616 *pu32Dst = u32Value;
9617 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9618 }
9619
9620 /* Commit the new RSP value unless we an access handler made trouble. */
9621 if (rc == VINF_SUCCESS)
9622 pCtx->rsp = uNewRsp;
9623
9624 return rc;
9625}
9626
9627
9628/**
9629 * Pushes a dword segment register value onto the stack.
9630 *
9631 * @returns Strict VBox status code.
9632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9633 * @param u32Value The value to push.
9634 */
9635IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9636{
9637 /* Increment the stack pointer. */
9638 uint64_t uNewRsp;
9639 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9640 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9641
9642 VBOXSTRICTRC rc;
9643 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9644 {
9645 /* The recompiler writes a full dword. */
9646 uint32_t *pu32Dst;
9647 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9648 if (rc == VINF_SUCCESS)
9649 {
9650 *pu32Dst = u32Value;
9651 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9652 }
9653 }
9654 else
9655 {
9656 /* The intel docs talks about zero extending the selector register
9657 value. My actual intel CPU here might be zero extending the value
9658 but it still only writes the lower word... */
9659 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9660 * happens when crossing an electric page boundrary, is the high word checked
9661 * for write accessibility or not? Probably it is. What about segment limits?
9662 * It appears this behavior is also shared with trap error codes.
9663 *
9664 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9665 * ancient hardware when it actually did change. */
9666 uint16_t *pu16Dst;
9667 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9668 if (rc == VINF_SUCCESS)
9669 {
9670 *pu16Dst = (uint16_t)u32Value;
9671 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9672 }
9673 }
9674
9675 /* Commit the new RSP value unless we an access handler made trouble. */
9676 if (rc == VINF_SUCCESS)
9677 pCtx->rsp = uNewRsp;
9678
9679 return rc;
9680}
9681
9682
9683/**
9684 * Pushes a qword onto the stack.
9685 *
9686 * @returns Strict VBox status code.
9687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9688 * @param u64Value The value to push.
9689 */
9690IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9691{
9692 /* Increment the stack pointer. */
9693 uint64_t uNewRsp;
9694 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9695 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9696
9697 /* Write the word the lazy way. */
9698 uint64_t *pu64Dst;
9699 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9700 if (rc == VINF_SUCCESS)
9701 {
9702 *pu64Dst = u64Value;
9703 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9704 }
9705
9706 /* Commit the new RSP value unless we an access handler made trouble. */
9707 if (rc == VINF_SUCCESS)
9708 pCtx->rsp = uNewRsp;
9709
9710 return rc;
9711}
9712
9713
9714/**
9715 * Pops a word from the stack.
9716 *
9717 * @returns Strict VBox status code.
9718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9719 * @param pu16Value Where to store the popped value.
9720 */
9721IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9722{
9723 /* Increment the stack pointer. */
9724 uint64_t uNewRsp;
9725 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9726 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9727
9728 /* Write the word the lazy way. */
9729 uint16_t const *pu16Src;
9730 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9731 if (rc == VINF_SUCCESS)
9732 {
9733 *pu16Value = *pu16Src;
9734 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9735
9736 /* Commit the new RSP value. */
9737 if (rc == VINF_SUCCESS)
9738 pCtx->rsp = uNewRsp;
9739 }
9740
9741 return rc;
9742}
9743
9744
9745/**
9746 * Pops a dword from the stack.
9747 *
9748 * @returns Strict VBox status code.
9749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9750 * @param pu32Value Where to store the popped value.
9751 */
9752IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9753{
9754 /* Increment the stack pointer. */
9755 uint64_t uNewRsp;
9756 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9757 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9758
9759 /* Write the word the lazy way. */
9760 uint32_t const *pu32Src;
9761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9762 if (rc == VINF_SUCCESS)
9763 {
9764 *pu32Value = *pu32Src;
9765 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9766
9767 /* Commit the new RSP value. */
9768 if (rc == VINF_SUCCESS)
9769 pCtx->rsp = uNewRsp;
9770 }
9771
9772 return rc;
9773}
9774
9775
9776/**
9777 * Pops a qword from the stack.
9778 *
9779 * @returns Strict VBox status code.
9780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9781 * @param pu64Value Where to store the popped value.
9782 */
9783IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9784{
9785 /* Increment the stack pointer. */
9786 uint64_t uNewRsp;
9787 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9788 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9789
9790 /* Write the word the lazy way. */
9791 uint64_t const *pu64Src;
9792 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9793 if (rc == VINF_SUCCESS)
9794 {
9795 *pu64Value = *pu64Src;
9796 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9797
9798 /* Commit the new RSP value. */
9799 if (rc == VINF_SUCCESS)
9800 pCtx->rsp = uNewRsp;
9801 }
9802
9803 return rc;
9804}
9805
9806
9807/**
9808 * Pushes a word onto the stack, using a temporary stack pointer.
9809 *
9810 * @returns Strict VBox status code.
9811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9812 * @param u16Value The value to push.
9813 * @param pTmpRsp Pointer to the temporary stack pointer.
9814 */
9815IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9816{
9817 /* Increment the stack pointer. */
9818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9819 RTUINT64U NewRsp = *pTmpRsp;
9820 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9821
9822 /* Write the word the lazy way. */
9823 uint16_t *pu16Dst;
9824 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9825 if (rc == VINF_SUCCESS)
9826 {
9827 *pu16Dst = u16Value;
9828 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9829 }
9830
9831 /* Commit the new RSP value unless we an access handler made trouble. */
9832 if (rc == VINF_SUCCESS)
9833 *pTmpRsp = NewRsp;
9834
9835 return rc;
9836}
9837
9838
9839/**
9840 * Pushes a dword onto the stack, using a temporary stack pointer.
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param u32Value The value to push.
9845 * @param pTmpRsp Pointer to the temporary stack pointer.
9846 */
9847IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9848{
9849 /* Increment the stack pointer. */
9850 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9851 RTUINT64U NewRsp = *pTmpRsp;
9852 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9853
9854 /* Write the word the lazy way. */
9855 uint32_t *pu32Dst;
9856 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9857 if (rc == VINF_SUCCESS)
9858 {
9859 *pu32Dst = u32Value;
9860 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9861 }
9862
9863 /* Commit the new RSP value unless we an access handler made trouble. */
9864 if (rc == VINF_SUCCESS)
9865 *pTmpRsp = NewRsp;
9866
9867 return rc;
9868}
9869
9870
9871/**
9872 * Pushes a dword onto the stack, using a temporary stack pointer.
9873 *
9874 * @returns Strict VBox status code.
9875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9876 * @param u64Value The value to push.
9877 * @param pTmpRsp Pointer to the temporary stack pointer.
9878 */
9879IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9880{
9881 /* Increment the stack pointer. */
9882 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9883 RTUINT64U NewRsp = *pTmpRsp;
9884 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9885
9886 /* Write the word the lazy way. */
9887 uint64_t *pu64Dst;
9888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9889 if (rc == VINF_SUCCESS)
9890 {
9891 *pu64Dst = u64Value;
9892 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9893 }
9894
9895 /* Commit the new RSP value unless we an access handler made trouble. */
9896 if (rc == VINF_SUCCESS)
9897 *pTmpRsp = NewRsp;
9898
9899 return rc;
9900}
9901
9902
9903/**
9904 * Pops a word from the stack, using a temporary stack pointer.
9905 *
9906 * @returns Strict VBox status code.
9907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9908 * @param pu16Value Where to store the popped value.
9909 * @param pTmpRsp Pointer to the temporary stack pointer.
9910 */
9911IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9912{
9913 /* Increment the stack pointer. */
9914 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9915 RTUINT64U NewRsp = *pTmpRsp;
9916 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9917
9918 /* Write the word the lazy way. */
9919 uint16_t const *pu16Src;
9920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9921 if (rc == VINF_SUCCESS)
9922 {
9923 *pu16Value = *pu16Src;
9924 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9925
9926 /* Commit the new RSP value. */
9927 if (rc == VINF_SUCCESS)
9928 *pTmpRsp = NewRsp;
9929 }
9930
9931 return rc;
9932}
9933
9934
9935/**
9936 * Pops a dword from the stack, using a temporary stack pointer.
9937 *
9938 * @returns Strict VBox status code.
9939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9940 * @param pu32Value Where to store the popped value.
9941 * @param pTmpRsp Pointer to the temporary stack pointer.
9942 */
9943IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9944{
9945 /* Increment the stack pointer. */
9946 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9947 RTUINT64U NewRsp = *pTmpRsp;
9948 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9949
9950 /* Write the word the lazy way. */
9951 uint32_t const *pu32Src;
9952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9953 if (rc == VINF_SUCCESS)
9954 {
9955 *pu32Value = *pu32Src;
9956 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9957
9958 /* Commit the new RSP value. */
9959 if (rc == VINF_SUCCESS)
9960 *pTmpRsp = NewRsp;
9961 }
9962
9963 return rc;
9964}
9965
9966
9967/**
9968 * Pops a qword from the stack, using a temporary stack pointer.
9969 *
9970 * @returns Strict VBox status code.
9971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9972 * @param pu64Value Where to store the popped value.
9973 * @param pTmpRsp Pointer to the temporary stack pointer.
9974 */
9975IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9976{
9977 /* Increment the stack pointer. */
9978 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9979 RTUINT64U NewRsp = *pTmpRsp;
9980 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9981
9982 /* Write the word the lazy way. */
9983 uint64_t const *pu64Src;
9984 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9985 if (rcStrict == VINF_SUCCESS)
9986 {
9987 *pu64Value = *pu64Src;
9988 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9989
9990 /* Commit the new RSP value. */
9991 if (rcStrict == VINF_SUCCESS)
9992 *pTmpRsp = NewRsp;
9993 }
9994
9995 return rcStrict;
9996}
9997
9998
9999/**
10000 * Begin a special stack push (used by interrupt, exceptions and such).
10001 *
10002 * This will raise \#SS or \#PF if appropriate.
10003 *
10004 * @returns Strict VBox status code.
10005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10006 * @param cbMem The number of bytes to push onto the stack.
10007 * @param ppvMem Where to return the pointer to the stack memory.
10008 * As with the other memory functions this could be
10009 * direct access or bounce buffered access, so
10010 * don't commit register until the commit call
10011 * succeeds.
10012 * @param puNewRsp Where to return the new RSP value. This must be
10013 * passed unchanged to
10014 * iemMemStackPushCommitSpecial().
10015 */
10016IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10017{
10018 Assert(cbMem < UINT8_MAX);
10019 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10020 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10021 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10022}
10023
10024
10025/**
10026 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10027 *
10028 * This will update the rSP.
10029 *
10030 * @returns Strict VBox status code.
10031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10032 * @param pvMem The pointer returned by
10033 * iemMemStackPushBeginSpecial().
10034 * @param uNewRsp The new RSP value returned by
10035 * iemMemStackPushBeginSpecial().
10036 */
10037IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10038{
10039 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10040 if (rcStrict == VINF_SUCCESS)
10041 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10042 return rcStrict;
10043}
10044
10045
10046/**
10047 * Begin a special stack pop (used by iret, retf and such).
10048 *
10049 * This will raise \#SS or \#PF if appropriate.
10050 *
10051 * @returns Strict VBox status code.
10052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10053 * @param cbMem The number of bytes to pop from the stack.
10054 * @param ppvMem Where to return the pointer to the stack memory.
10055 * @param puNewRsp Where to return the new RSP value. This must be
10056 * assigned to CPUMCTX::rsp manually some time
10057 * after iemMemStackPopDoneSpecial() has been
10058 * called.
10059 */
10060IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10061{
10062 Assert(cbMem < UINT8_MAX);
10063 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10064 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10065 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10066}
10067
10068
10069/**
10070 * Continue a special stack pop (used by iret and retf).
10071 *
10072 * This will raise \#SS or \#PF if appropriate.
10073 *
10074 * @returns Strict VBox status code.
10075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10076 * @param cbMem The number of bytes to pop from the stack.
10077 * @param ppvMem Where to return the pointer to the stack memory.
10078 * @param puNewRsp Where to return the new RSP value. This must be
10079 * assigned to CPUMCTX::rsp manually some time
10080 * after iemMemStackPopDoneSpecial() has been
10081 * called.
10082 */
10083IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10084{
10085 Assert(cbMem < UINT8_MAX);
10086 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10087 RTUINT64U NewRsp;
10088 NewRsp.u = *puNewRsp;
10089 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10090 *puNewRsp = NewRsp.u;
10091 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10092}
10093
10094
10095/**
10096 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10097 * iemMemStackPopContinueSpecial).
10098 *
10099 * The caller will manually commit the rSP.
10100 *
10101 * @returns Strict VBox status code.
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param pvMem The pointer returned by
10104 * iemMemStackPopBeginSpecial() or
10105 * iemMemStackPopContinueSpecial().
10106 */
10107IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10108{
10109 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10110}
10111
10112
10113/**
10114 * Fetches a system table byte.
10115 *
10116 * @returns Strict VBox status code.
10117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10118 * @param pbDst Where to return the byte.
10119 * @param iSegReg The index of the segment register to use for
10120 * this access. The base and limits are checked.
10121 * @param GCPtrMem The address of the guest memory.
10122 */
10123IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10124{
10125 /* The lazy approach for now... */
10126 uint8_t const *pbSrc;
10127 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10128 if (rc == VINF_SUCCESS)
10129 {
10130 *pbDst = *pbSrc;
10131 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10132 }
10133 return rc;
10134}
10135
10136
10137/**
10138 * Fetches a system table word.
10139 *
10140 * @returns Strict VBox status code.
10141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10142 * @param pu16Dst Where to return the word.
10143 * @param iSegReg The index of the segment register to use for
10144 * this access. The base and limits are checked.
10145 * @param GCPtrMem The address of the guest memory.
10146 */
10147IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10148{
10149 /* The lazy approach for now... */
10150 uint16_t const *pu16Src;
10151 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10152 if (rc == VINF_SUCCESS)
10153 {
10154 *pu16Dst = *pu16Src;
10155 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10156 }
10157 return rc;
10158}
10159
10160
10161/**
10162 * Fetches a system table dword.
10163 *
10164 * @returns Strict VBox status code.
10165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10166 * @param pu32Dst Where to return the dword.
10167 * @param iSegReg The index of the segment register to use for
10168 * this access. The base and limits are checked.
10169 * @param GCPtrMem The address of the guest memory.
10170 */
10171IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10172{
10173 /* The lazy approach for now... */
10174 uint32_t const *pu32Src;
10175 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10176 if (rc == VINF_SUCCESS)
10177 {
10178 *pu32Dst = *pu32Src;
10179 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10180 }
10181 return rc;
10182}
10183
10184
10185/**
10186 * Fetches a system table qword.
10187 *
10188 * @returns Strict VBox status code.
10189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10190 * @param pu64Dst Where to return the qword.
10191 * @param iSegReg The index of the segment register to use for
10192 * this access. The base and limits are checked.
10193 * @param GCPtrMem The address of the guest memory.
10194 */
10195IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10196{
10197 /* The lazy approach for now... */
10198 uint64_t const *pu64Src;
10199 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10200 if (rc == VINF_SUCCESS)
10201 {
10202 *pu64Dst = *pu64Src;
10203 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10204 }
10205 return rc;
10206}
10207
10208
10209/**
10210 * Fetches a descriptor table entry with caller specified error code.
10211 *
10212 * @returns Strict VBox status code.
10213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10214 * @param pDesc Where to return the descriptor table entry.
10215 * @param uSel The selector which table entry to fetch.
10216 * @param uXcpt The exception to raise on table lookup error.
10217 * @param uErrorCode The error code associated with the exception.
10218 */
10219IEM_STATIC VBOXSTRICTRC
10220iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10221{
10222 AssertPtr(pDesc);
10223 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10224
10225 /** @todo did the 286 require all 8 bytes to be accessible? */
10226 /*
10227 * Get the selector table base and check bounds.
10228 */
10229 RTGCPTR GCPtrBase;
10230 if (uSel & X86_SEL_LDT)
10231 {
10232 if ( !pCtx->ldtr.Attr.n.u1Present
10233 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10234 {
10235 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10236 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10237 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10238 uErrorCode, 0);
10239 }
10240
10241 Assert(pCtx->ldtr.Attr.n.u1Present);
10242 GCPtrBase = pCtx->ldtr.u64Base;
10243 }
10244 else
10245 {
10246 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10247 {
10248 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10249 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10250 uErrorCode, 0);
10251 }
10252 GCPtrBase = pCtx->gdtr.pGdt;
10253 }
10254
10255 /*
10256 * Read the legacy descriptor and maybe the long mode extensions if
10257 * required.
10258 */
10259 VBOXSTRICTRC rcStrict;
10260 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10261 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10262 else
10263 {
10264 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10265 if (rcStrict != VINF_SUCCESS)
10266 return rcStrict;
10267 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10268 if (rcStrict != VINF_SUCCESS)
10269 return rcStrict;
10270 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10271 }
10272
10273 if (rcStrict == VINF_SUCCESS)
10274 {
10275 if ( !IEM_IS_LONG_MODE(pVCpu)
10276 || pDesc->Legacy.Gen.u1DescType)
10277 pDesc->Long.au64[1] = 0;
10278 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10279 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10280 else
10281 {
10282 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10283 /** @todo is this the right exception? */
10284 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10285 }
10286 }
10287 return rcStrict;
10288}
10289
10290
10291/**
10292 * Fetches a descriptor table entry.
10293 *
10294 * @returns Strict VBox status code.
10295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10296 * @param pDesc Where to return the descriptor table entry.
10297 * @param uSel The selector which table entry to fetch.
10298 * @param uXcpt The exception to raise on table lookup error.
10299 */
10300IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10301{
10302 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10303}
10304
10305
10306/**
10307 * Fakes a long mode stack selector for SS = 0.
10308 *
10309 * @param pDescSs Where to return the fake stack descriptor.
10310 * @param uDpl The DPL we want.
10311 */
10312IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10313{
10314 pDescSs->Long.au64[0] = 0;
10315 pDescSs->Long.au64[1] = 0;
10316 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10317 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10318 pDescSs->Long.Gen.u2Dpl = uDpl;
10319 pDescSs->Long.Gen.u1Present = 1;
10320 pDescSs->Long.Gen.u1Long = 1;
10321}
10322
10323
10324/**
10325 * Marks the selector descriptor as accessed (only non-system descriptors).
10326 *
10327 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10328 * will therefore skip the limit checks.
10329 *
10330 * @returns Strict VBox status code.
10331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10332 * @param uSel The selector.
10333 */
10334IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10335{
10336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10337
10338 /*
10339 * Get the selector table base and calculate the entry address.
10340 */
10341 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10342 ? pCtx->ldtr.u64Base
10343 : pCtx->gdtr.pGdt;
10344 GCPtr += uSel & X86_SEL_MASK;
10345
10346 /*
10347 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10348 * ugly stuff to avoid this. This will make sure it's an atomic access
10349 * as well more or less remove any question about 8-bit or 32-bit accesss.
10350 */
10351 VBOXSTRICTRC rcStrict;
10352 uint32_t volatile *pu32;
10353 if ((GCPtr & 3) == 0)
10354 {
10355 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10356 GCPtr += 2 + 2;
10357 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10358 if (rcStrict != VINF_SUCCESS)
10359 return rcStrict;
10360 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10361 }
10362 else
10363 {
10364 /* The misaligned GDT/LDT case, map the whole thing. */
10365 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10366 if (rcStrict != VINF_SUCCESS)
10367 return rcStrict;
10368 switch ((uintptr_t)pu32 & 3)
10369 {
10370 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10371 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10372 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10373 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10374 }
10375 }
10376
10377 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10378}
10379
10380/** @} */
10381
10382
10383/*
10384 * Include the C/C++ implementation of instruction.
10385 */
10386#include "IEMAllCImpl.cpp.h"
10387
10388
10389
10390/** @name "Microcode" macros.
10391 *
10392 * The idea is that we should be able to use the same code to interpret
10393 * instructions as well as recompiler instructions. Thus this obfuscation.
10394 *
10395 * @{
10396 */
10397#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10398#define IEM_MC_END() }
10399#define IEM_MC_PAUSE() do {} while (0)
10400#define IEM_MC_CONTINUE() do {} while (0)
10401
10402/** Internal macro. */
10403#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10404 do \
10405 { \
10406 VBOXSTRICTRC rcStrict2 = a_Expr; \
10407 if (rcStrict2 != VINF_SUCCESS) \
10408 return rcStrict2; \
10409 } while (0)
10410
10411
10412#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10413#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10414#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10415#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10416#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10417#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10418#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10419#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10420#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10421 do { \
10422 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10423 return iemRaiseDeviceNotAvailable(pVCpu); \
10424 } while (0)
10425#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10426 do { \
10427 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10428 return iemRaiseMathFault(pVCpu); \
10429 } while (0)
10430#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10431 do { \
10432 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10433 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10434 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10435 return iemRaiseUndefinedOpcode(pVCpu); \
10436 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10437 return iemRaiseDeviceNotAvailable(pVCpu); \
10438 } while (0)
10439#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10440 do { \
10441 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10442 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10443 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10444 return iemRaiseUndefinedOpcode(pVCpu); \
10445 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10446 return iemRaiseDeviceNotAvailable(pVCpu); \
10447 } while (0)
10448#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10449 do { \
10450 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10451 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10452 return iemRaiseUndefinedOpcode(pVCpu); \
10453 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10454 return iemRaiseDeviceNotAvailable(pVCpu); \
10455 } while (0)
10456#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10457 do { \
10458 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10459 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10460 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10461 return iemRaiseUndefinedOpcode(pVCpu); \
10462 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10463 return iemRaiseDeviceNotAvailable(pVCpu); \
10464 } while (0)
10465#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10466 do { \
10467 if (pVCpu->iem.s.uCpl != 0) \
10468 return iemRaiseGeneralProtectionFault0(pVCpu); \
10469 } while (0)
10470
10471
10472#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10473#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10474#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10475#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10476#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10477#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10478#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10479 uint32_t a_Name; \
10480 uint32_t *a_pName = &a_Name
10481#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10482 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10483
10484#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10485#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10486
10487#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10488#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10489#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10490#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10491#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10492#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10493#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10494#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10495#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10496#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10497#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10498#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10499#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10500#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10501#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10502#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10503#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10504#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10505#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10506#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10507#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10508#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10509#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10510#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10511#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10512#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10513#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10514#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10515#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10516/** @note Not for IOPL or IF testing or modification. */
10517#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10518#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10519#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10520#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10521
10522#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10523#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10524#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10525#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10526#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10527#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10528#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10529#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10530#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10531#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10532#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10533 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10534
10535#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10536#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10537/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10538 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10539#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10540#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10541/** @note Not for IOPL or IF testing or modification. */
10542#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10543
10544#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10545#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10546#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10547 do { \
10548 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10549 *pu32Reg += (a_u32Value); \
10550 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10551 } while (0)
10552#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10553
10554#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10555#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10556#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10557 do { \
10558 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10559 *pu32Reg -= (a_u32Value); \
10560 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10561 } while (0)
10562#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10563#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10564
10565#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10566#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10567#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10568#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10569#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10570#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10571#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10572
10573#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10574#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10575#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10576#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10577
10578#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10579#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10580#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10581
10582#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10583#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10584#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10585
10586#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10587#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10588#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10589
10590#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10591#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10592#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10593
10594#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10595
10596#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10597
10598#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10599#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10600#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10601 do { \
10602 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10603 *pu32Reg &= (a_u32Value); \
10604 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10605 } while (0)
10606#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10607
10608#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10609#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10610#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10611 do { \
10612 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10613 *pu32Reg |= (a_u32Value); \
10614 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10615 } while (0)
10616#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10617
10618
10619/** @note Not for IOPL or IF modification. */
10620#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10621/** @note Not for IOPL or IF modification. */
10622#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10623/** @note Not for IOPL or IF modification. */
10624#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10625
10626#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10627
10628
10629#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10630 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10631#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10632 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10633#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10634 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10635#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10636 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10637#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10638 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10639#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10640 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10641#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10642 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10643
10644#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10645 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10646#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10647 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10648#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10649 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10650#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10651 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10652#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10653 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10654#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10655 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10656 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10657 } while (0)
10658#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10659 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10660 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10661 } while (0)
10662#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10663 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10664#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10665 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10666#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10667 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10668#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10669 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10670 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10671
10672#ifndef IEM_WITH_SETJMP
10673# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10675# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10677# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10679#else
10680# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10681 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10682# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10683 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10684# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10685 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10686#endif
10687
10688#ifndef IEM_WITH_SETJMP
10689# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10691# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10693# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10695#else
10696# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10697 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10698# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10699 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10700# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10701 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10702#endif
10703
10704#ifndef IEM_WITH_SETJMP
10705# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10707# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10709# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10711#else
10712# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10713 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10714# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10715 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10716# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10717 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10718#endif
10719
10720#ifdef SOME_UNUSED_FUNCTION
10721# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10723#endif
10724
10725#ifndef IEM_WITH_SETJMP
10726# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10728# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10730# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10732# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10734#else
10735# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10736 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10737# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10738 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10739# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10740 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10741# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10742 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10743#endif
10744
10745#ifndef IEM_WITH_SETJMP
10746# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10748# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10750# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10752#else
10753# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10754 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10755# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10756 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10757# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10758 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10759#endif
10760
10761#ifndef IEM_WITH_SETJMP
10762# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10764# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10766#else
10767# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10768 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10769# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10770 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10771#endif
10772
10773
10774
10775#ifndef IEM_WITH_SETJMP
10776# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10777 do { \
10778 uint8_t u8Tmp; \
10779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10780 (a_u16Dst) = u8Tmp; \
10781 } while (0)
10782# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10783 do { \
10784 uint8_t u8Tmp; \
10785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10786 (a_u32Dst) = u8Tmp; \
10787 } while (0)
10788# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10789 do { \
10790 uint8_t u8Tmp; \
10791 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10792 (a_u64Dst) = u8Tmp; \
10793 } while (0)
10794# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10795 do { \
10796 uint16_t u16Tmp; \
10797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10798 (a_u32Dst) = u16Tmp; \
10799 } while (0)
10800# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10801 do { \
10802 uint16_t u16Tmp; \
10803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10804 (a_u64Dst) = u16Tmp; \
10805 } while (0)
10806# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10807 do { \
10808 uint32_t u32Tmp; \
10809 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10810 (a_u64Dst) = u32Tmp; \
10811 } while (0)
10812#else /* IEM_WITH_SETJMP */
10813# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10814 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10815# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10816 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10817# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10818 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10819# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10820 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10821# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10822 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10823# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10824 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10825#endif /* IEM_WITH_SETJMP */
10826
10827#ifndef IEM_WITH_SETJMP
10828# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10829 do { \
10830 uint8_t u8Tmp; \
10831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10832 (a_u16Dst) = (int8_t)u8Tmp; \
10833 } while (0)
10834# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10835 do { \
10836 uint8_t u8Tmp; \
10837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10838 (a_u32Dst) = (int8_t)u8Tmp; \
10839 } while (0)
10840# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10841 do { \
10842 uint8_t u8Tmp; \
10843 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10844 (a_u64Dst) = (int8_t)u8Tmp; \
10845 } while (0)
10846# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10847 do { \
10848 uint16_t u16Tmp; \
10849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10850 (a_u32Dst) = (int16_t)u16Tmp; \
10851 } while (0)
10852# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10853 do { \
10854 uint16_t u16Tmp; \
10855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10856 (a_u64Dst) = (int16_t)u16Tmp; \
10857 } while (0)
10858# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10859 do { \
10860 uint32_t u32Tmp; \
10861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10862 (a_u64Dst) = (int32_t)u32Tmp; \
10863 } while (0)
10864#else /* IEM_WITH_SETJMP */
10865# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10866 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10867# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10868 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10869# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10870 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10871# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10872 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10873# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10874 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10875# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10876 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10877#endif /* IEM_WITH_SETJMP */
10878
10879#ifndef IEM_WITH_SETJMP
10880# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10882# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10883 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10884# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10885 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10886# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10887 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10888#else
10889# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10890 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10891# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10892 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10893# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10894 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10895# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10896 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10897#endif
10898
10899#ifndef IEM_WITH_SETJMP
10900# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10902# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10904# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10906# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10908#else
10909# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10910 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10911# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10912 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10913# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10914 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10915# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10916 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10917#endif
10918
10919#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10920#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10921#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10922#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10923#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10924#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10925#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10926 do { \
10927 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10928 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10929 } while (0)
10930
10931#ifndef IEM_WITH_SETJMP
10932# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10934# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10936#else
10937# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10938 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10939# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10940 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10941#endif
10942
10943
10944#define IEM_MC_PUSH_U16(a_u16Value) \
10945 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10946#define IEM_MC_PUSH_U32(a_u32Value) \
10947 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10948#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10949 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10950#define IEM_MC_PUSH_U64(a_u64Value) \
10951 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10952
10953#define IEM_MC_POP_U16(a_pu16Value) \
10954 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10955#define IEM_MC_POP_U32(a_pu32Value) \
10956 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10957#define IEM_MC_POP_U64(a_pu64Value) \
10958 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10959
10960/** Maps guest memory for direct or bounce buffered access.
10961 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10962 * @remarks May return.
10963 */
10964#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10965 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10966
10967/** Maps guest memory for direct or bounce buffered access.
10968 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10969 * @remarks May return.
10970 */
10971#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10972 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10973
10974/** Commits the memory and unmaps the guest memory.
10975 * @remarks May return.
10976 */
10977#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10978 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10979
10980/** Commits the memory and unmaps the guest memory unless the FPU status word
10981 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10982 * that would cause FLD not to store.
10983 *
10984 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10985 * store, while \#P will not.
10986 *
10987 * @remarks May in theory return - for now.
10988 */
10989#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10990 do { \
10991 if ( !(a_u16FSW & X86_FSW_ES) \
10992 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10993 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10994 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10995 } while (0)
10996
10997/** Calculate efficient address from R/M. */
10998#ifndef IEM_WITH_SETJMP
10999# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11000 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11001#else
11002# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11003 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11004#endif
11005
11006#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11007#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11008#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11009#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11010#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11011#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11012#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11013
11014/**
11015 * Defers the rest of the instruction emulation to a C implementation routine
11016 * and returns, only taking the standard parameters.
11017 *
11018 * @param a_pfnCImpl The pointer to the C routine.
11019 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11020 */
11021#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11022
11023/**
11024 * Defers the rest of instruction emulation to a C implementation routine and
11025 * returns, taking one argument in addition to the standard ones.
11026 *
11027 * @param a_pfnCImpl The pointer to the C routine.
11028 * @param a0 The argument.
11029 */
11030#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11031
11032/**
11033 * Defers the rest of the instruction emulation to a C implementation routine
11034 * and returns, taking two arguments in addition to the standard ones.
11035 *
11036 * @param a_pfnCImpl The pointer to the C routine.
11037 * @param a0 The first extra argument.
11038 * @param a1 The second extra argument.
11039 */
11040#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11041
11042/**
11043 * Defers the rest of the instruction emulation to a C implementation routine
11044 * and returns, taking three arguments in addition to the standard ones.
11045 *
11046 * @param a_pfnCImpl The pointer to the C routine.
11047 * @param a0 The first extra argument.
11048 * @param a1 The second extra argument.
11049 * @param a2 The third extra argument.
11050 */
11051#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11052
11053/**
11054 * Defers the rest of the instruction emulation to a C implementation routine
11055 * and returns, taking four arguments in addition to the standard ones.
11056 *
11057 * @param a_pfnCImpl The pointer to the C routine.
11058 * @param a0 The first extra argument.
11059 * @param a1 The second extra argument.
11060 * @param a2 The third extra argument.
11061 * @param a3 The fourth extra argument.
11062 */
11063#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11064
11065/**
11066 * Defers the rest of the instruction emulation to a C implementation routine
11067 * and returns, taking two arguments in addition to the standard ones.
11068 *
11069 * @param a_pfnCImpl The pointer to the C routine.
11070 * @param a0 The first extra argument.
11071 * @param a1 The second extra argument.
11072 * @param a2 The third extra argument.
11073 * @param a3 The fourth extra argument.
11074 * @param a4 The fifth extra argument.
11075 */
11076#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11077
11078/**
11079 * Defers the entire instruction emulation to a C implementation routine and
11080 * returns, only taking the standard parameters.
11081 *
11082 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11083 *
11084 * @param a_pfnCImpl The pointer to the C routine.
11085 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11086 */
11087#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11088
11089/**
11090 * Defers the entire instruction emulation to a C implementation routine and
11091 * returns, taking one argument in addition to the standard ones.
11092 *
11093 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11094 *
11095 * @param a_pfnCImpl The pointer to the C routine.
11096 * @param a0 The argument.
11097 */
11098#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11099
11100/**
11101 * Defers the entire instruction emulation to a C implementation routine and
11102 * returns, taking two arguments in addition to the standard ones.
11103 *
11104 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11105 *
11106 * @param a_pfnCImpl The pointer to the C routine.
11107 * @param a0 The first extra argument.
11108 * @param a1 The second extra argument.
11109 */
11110#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11111
11112/**
11113 * Defers the entire instruction emulation to a C implementation routine and
11114 * returns, taking three arguments in addition to the standard ones.
11115 *
11116 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11117 *
11118 * @param a_pfnCImpl The pointer to the C routine.
11119 * @param a0 The first extra argument.
11120 * @param a1 The second extra argument.
11121 * @param a2 The third extra argument.
11122 */
11123#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11124
11125/**
11126 * Calls a FPU assembly implementation taking one visible argument.
11127 *
11128 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11129 * @param a0 The first extra argument.
11130 */
11131#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11132 do { \
11133 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11134 } while (0)
11135
11136/**
11137 * Calls a FPU assembly implementation taking two visible arguments.
11138 *
11139 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11140 * @param a0 The first extra argument.
11141 * @param a1 The second extra argument.
11142 */
11143#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11144 do { \
11145 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11146 } while (0)
11147
11148/**
11149 * Calls a FPU assembly implementation taking three visible arguments.
11150 *
11151 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11152 * @param a0 The first extra argument.
11153 * @param a1 The second extra argument.
11154 * @param a2 The third extra argument.
11155 */
11156#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11157 do { \
11158 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11159 } while (0)
11160
11161#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11162 do { \
11163 (a_FpuData).FSW = (a_FSW); \
11164 (a_FpuData).r80Result = *(a_pr80Value); \
11165 } while (0)
11166
11167/** Pushes FPU result onto the stack. */
11168#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11169 iemFpuPushResult(pVCpu, &a_FpuData)
11170/** Pushes FPU result onto the stack and sets the FPUDP. */
11171#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11172 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11173
11174/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11175#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11176 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11177
11178/** Stores FPU result in a stack register. */
11179#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11180 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11181/** Stores FPU result in a stack register and pops the stack. */
11182#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11183 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11184/** Stores FPU result in a stack register and sets the FPUDP. */
11185#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11186 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11187/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11188 * stack. */
11189#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11190 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11191
11192/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11193#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11194 iemFpuUpdateOpcodeAndIp(pVCpu)
11195/** Free a stack register (for FFREE and FFREEP). */
11196#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11197 iemFpuStackFree(pVCpu, a_iStReg)
11198/** Increment the FPU stack pointer. */
11199#define IEM_MC_FPU_STACK_INC_TOP() \
11200 iemFpuStackIncTop(pVCpu)
11201/** Decrement the FPU stack pointer. */
11202#define IEM_MC_FPU_STACK_DEC_TOP() \
11203 iemFpuStackDecTop(pVCpu)
11204
11205/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11206#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11207 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11208/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11209#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11210 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11211/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11212#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11213 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11214/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11215#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11216 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11217/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11218 * stack. */
11219#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11220 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11221/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11222#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11223 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11224
11225/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11226#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11227 iemFpuStackUnderflow(pVCpu, a_iStDst)
11228/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11229 * stack. */
11230#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11231 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11232/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11233 * FPUDS. */
11234#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11235 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11236/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11237 * FPUDS. Pops stack. */
11238#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11239 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11240/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11241 * stack twice. */
11242#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11243 iemFpuStackUnderflowThenPopPop(pVCpu)
11244/** Raises a FPU stack underflow exception for an instruction pushing a result
11245 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11246#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11247 iemFpuStackPushUnderflow(pVCpu)
11248/** Raises a FPU stack underflow exception for an instruction pushing a result
11249 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11250#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11251 iemFpuStackPushUnderflowTwo(pVCpu)
11252
11253/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11254 * FPUIP, FPUCS and FOP. */
11255#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11256 iemFpuStackPushOverflow(pVCpu)
11257/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11258 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11259#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11260 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11261/** Prepares for using the FPU state.
11262 * Ensures that we can use the host FPU in the current context (RC+R0.
11263 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11264#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11265/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11266#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11267/** Actualizes the guest FPU state so it can be accessed and modified. */
11268#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11269
11270/** Prepares for using the SSE state.
11271 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11272 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11273#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11274/** Actualizes the guest XMM0..15 register state for read-only access. */
11275#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11276/** Actualizes the guest XMM0..15 register state for read-write access. */
11277#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11278
11279/**
11280 * Calls a MMX assembly implementation taking two visible arguments.
11281 *
11282 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11283 * @param a0 The first extra argument.
11284 * @param a1 The second extra argument.
11285 */
11286#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11287 do { \
11288 IEM_MC_PREPARE_FPU_USAGE(); \
11289 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11290 } while (0)
11291
11292/**
11293 * Calls a MMX assembly implementation taking three visible arguments.
11294 *
11295 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11296 * @param a0 The first extra argument.
11297 * @param a1 The second extra argument.
11298 * @param a2 The third extra argument.
11299 */
11300#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11301 do { \
11302 IEM_MC_PREPARE_FPU_USAGE(); \
11303 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11304 } while (0)
11305
11306
11307/**
11308 * Calls a SSE assembly implementation taking two visible arguments.
11309 *
11310 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11311 * @param a0 The first extra argument.
11312 * @param a1 The second extra argument.
11313 */
11314#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11315 do { \
11316 IEM_MC_PREPARE_SSE_USAGE(); \
11317 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11318 } while (0)
11319
11320/**
11321 * Calls a SSE assembly implementation taking three visible arguments.
11322 *
11323 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11324 * @param a0 The first extra argument.
11325 * @param a1 The second extra argument.
11326 * @param a2 The third extra argument.
11327 */
11328#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11329 do { \
11330 IEM_MC_PREPARE_SSE_USAGE(); \
11331 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11332 } while (0)
11333
11334/** @note Not for IOPL or IF testing. */
11335#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11336/** @note Not for IOPL or IF testing. */
11337#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11338/** @note Not for IOPL or IF testing. */
11339#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11340/** @note Not for IOPL or IF testing. */
11341#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11342/** @note Not for IOPL or IF testing. */
11343#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11344 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11345 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11346/** @note Not for IOPL or IF testing. */
11347#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11348 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11349 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11350/** @note Not for IOPL or IF testing. */
11351#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11352 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11353 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11354 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11355/** @note Not for IOPL or IF testing. */
11356#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11357 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11358 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11359 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11360#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11361#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11362#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11363/** @note Not for IOPL or IF testing. */
11364#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11365 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11366 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11367/** @note Not for IOPL or IF testing. */
11368#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11369 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11370 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11371/** @note Not for IOPL or IF testing. */
11372#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11373 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11374 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11375/** @note Not for IOPL or IF testing. */
11376#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11377 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11378 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11379/** @note Not for IOPL or IF testing. */
11380#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11381 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11382 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11383/** @note Not for IOPL or IF testing. */
11384#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11385 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11386 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11387#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11388#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11389
11390#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11391 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11392#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11393 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11394#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11395 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11396#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11397 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11398#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11399 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11400#define IEM_MC_IF_FCW_IM() \
11401 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11402
11403#define IEM_MC_ELSE() } else {
11404#define IEM_MC_ENDIF() } do {} while (0)
11405
11406/** @} */
11407
11408
11409/** @name Opcode Debug Helpers.
11410 * @{
11411 */
11412#ifdef VBOX_WITH_STATISTICS
11413# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11414#else
11415# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11416#endif
11417
11418#ifdef DEBUG
11419# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11420 do { \
11421 IEMOP_INC_STATS(a_Stats); \
11422 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11423 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11424 } while (0)
11425#else
11426# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11427#endif
11428
11429/** @} */
11430
11431
11432/** @name Opcode Helpers.
11433 * @{
11434 */
11435
11436#ifdef IN_RING3
11437# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11438 do { \
11439 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11440 else \
11441 { \
11442 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11443 return IEMOP_RAISE_INVALID_OPCODE(); \
11444 } \
11445 } while (0)
11446#else
11447# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11448 do { \
11449 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11450 else return IEMOP_RAISE_INVALID_OPCODE(); \
11451 } while (0)
11452#endif
11453
11454/** The instruction requires a 186 or later. */
11455#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11456# define IEMOP_HLP_MIN_186() do { } while (0)
11457#else
11458# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11459#endif
11460
11461/** The instruction requires a 286 or later. */
11462#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11463# define IEMOP_HLP_MIN_286() do { } while (0)
11464#else
11465# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11466#endif
11467
11468/** The instruction requires a 386 or later. */
11469#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11470# define IEMOP_HLP_MIN_386() do { } while (0)
11471#else
11472# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11473#endif
11474
11475/** The instruction requires a 386 or later if the given expression is true. */
11476#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11477# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11478#else
11479# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11480#endif
11481
11482/** The instruction requires a 486 or later. */
11483#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11484# define IEMOP_HLP_MIN_486() do { } while (0)
11485#else
11486# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11487#endif
11488
11489/** The instruction requires a Pentium (586) or later. */
11490#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11491# define IEMOP_HLP_MIN_586() do { } while (0)
11492#else
11493# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11494#endif
11495
11496/** The instruction requires a PentiumPro (686) or later. */
11497#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11498# define IEMOP_HLP_MIN_686() do { } while (0)
11499#else
11500# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11501#endif
11502
11503
11504/** The instruction raises an \#UD in real and V8086 mode. */
11505#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11506 do \
11507 { \
11508 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11509 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11510 } while (0)
11511
11512/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11513 * 64-bit mode. */
11514#define IEMOP_HLP_NO_64BIT() \
11515 do \
11516 { \
11517 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11518 return IEMOP_RAISE_INVALID_OPCODE(); \
11519 } while (0)
11520
11521/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11522 * 64-bit mode. */
11523#define IEMOP_HLP_ONLY_64BIT() \
11524 do \
11525 { \
11526 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11527 return IEMOP_RAISE_INVALID_OPCODE(); \
11528 } while (0)
11529
11530/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11531#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11532 do \
11533 { \
11534 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11535 iemRecalEffOpSize64Default(pVCpu); \
11536 } while (0)
11537
11538/** The instruction has 64-bit operand size if 64-bit mode. */
11539#define IEMOP_HLP_64BIT_OP_SIZE() \
11540 do \
11541 { \
11542 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11543 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11544 } while (0)
11545
11546/** Only a REX prefix immediately preceeding the first opcode byte takes
11547 * effect. This macro helps ensuring this as well as logging bad guest code. */
11548#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11549 do \
11550 { \
11551 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11552 { \
11553 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11554 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11555 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11556 pVCpu->iem.s.uRexB = 0; \
11557 pVCpu->iem.s.uRexIndex = 0; \
11558 pVCpu->iem.s.uRexReg = 0; \
11559 iemRecalEffOpSize(pVCpu); \
11560 } \
11561 } while (0)
11562
11563/**
11564 * Done decoding.
11565 */
11566#define IEMOP_HLP_DONE_DECODING() \
11567 do \
11568 { \
11569 /*nothing for now, maybe later... */ \
11570 } while (0)
11571
11572/**
11573 * Done decoding, raise \#UD exception if lock prefix present.
11574 */
11575#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11576 do \
11577 { \
11578 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11579 { /* likely */ } \
11580 else \
11581 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11582 } while (0)
11583#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11584 do \
11585 { \
11586 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11587 { /* likely */ } \
11588 else \
11589 { \
11590 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11591 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11592 } \
11593 } while (0)
11594#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11595 do \
11596 { \
11597 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11598 { /* likely */ } \
11599 else \
11600 { \
11601 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11602 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11603 } \
11604 } while (0)
11605
11606/**
11607 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11608 * are present.
11609 */
11610#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11611 do \
11612 { \
11613 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11614 { /* likely */ } \
11615 else \
11616 return IEMOP_RAISE_INVALID_OPCODE(); \
11617 } while (0)
11618
11619
11620/**
11621 * Calculates the effective address of a ModR/M memory operand.
11622 *
11623 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11624 *
11625 * @return Strict VBox status code.
11626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11627 * @param bRm The ModRM byte.
11628 * @param cbImm The size of any immediate following the
11629 * effective address opcode bytes. Important for
11630 * RIP relative addressing.
11631 * @param pGCPtrEff Where to return the effective address.
11632 */
11633IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11634{
11635 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11636 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11637# define SET_SS_DEF() \
11638 do \
11639 { \
11640 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11641 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11642 } while (0)
11643
11644 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11645 {
11646/** @todo Check the effective address size crap! */
11647 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11648 {
11649 uint16_t u16EffAddr;
11650
11651 /* Handle the disp16 form with no registers first. */
11652 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11653 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11654 else
11655 {
11656 /* Get the displacment. */
11657 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11658 {
11659 case 0: u16EffAddr = 0; break;
11660 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11661 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11662 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11663 }
11664
11665 /* Add the base and index registers to the disp. */
11666 switch (bRm & X86_MODRM_RM_MASK)
11667 {
11668 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11669 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11670 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11671 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11672 case 4: u16EffAddr += pCtx->si; break;
11673 case 5: u16EffAddr += pCtx->di; break;
11674 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11675 case 7: u16EffAddr += pCtx->bx; break;
11676 }
11677 }
11678
11679 *pGCPtrEff = u16EffAddr;
11680 }
11681 else
11682 {
11683 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11684 uint32_t u32EffAddr;
11685
11686 /* Handle the disp32 form with no registers first. */
11687 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11688 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11689 else
11690 {
11691 /* Get the register (or SIB) value. */
11692 switch ((bRm & X86_MODRM_RM_MASK))
11693 {
11694 case 0: u32EffAddr = pCtx->eax; break;
11695 case 1: u32EffAddr = pCtx->ecx; break;
11696 case 2: u32EffAddr = pCtx->edx; break;
11697 case 3: u32EffAddr = pCtx->ebx; break;
11698 case 4: /* SIB */
11699 {
11700 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11701
11702 /* Get the index and scale it. */
11703 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11704 {
11705 case 0: u32EffAddr = pCtx->eax; break;
11706 case 1: u32EffAddr = pCtx->ecx; break;
11707 case 2: u32EffAddr = pCtx->edx; break;
11708 case 3: u32EffAddr = pCtx->ebx; break;
11709 case 4: u32EffAddr = 0; /*none */ break;
11710 case 5: u32EffAddr = pCtx->ebp; break;
11711 case 6: u32EffAddr = pCtx->esi; break;
11712 case 7: u32EffAddr = pCtx->edi; break;
11713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11714 }
11715 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11716
11717 /* add base */
11718 switch (bSib & X86_SIB_BASE_MASK)
11719 {
11720 case 0: u32EffAddr += pCtx->eax; break;
11721 case 1: u32EffAddr += pCtx->ecx; break;
11722 case 2: u32EffAddr += pCtx->edx; break;
11723 case 3: u32EffAddr += pCtx->ebx; break;
11724 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11725 case 5:
11726 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11727 {
11728 u32EffAddr += pCtx->ebp;
11729 SET_SS_DEF();
11730 }
11731 else
11732 {
11733 uint32_t u32Disp;
11734 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11735 u32EffAddr += u32Disp;
11736 }
11737 break;
11738 case 6: u32EffAddr += pCtx->esi; break;
11739 case 7: u32EffAddr += pCtx->edi; break;
11740 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11741 }
11742 break;
11743 }
11744 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11745 case 6: u32EffAddr = pCtx->esi; break;
11746 case 7: u32EffAddr = pCtx->edi; break;
11747 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11748 }
11749
11750 /* Get and add the displacement. */
11751 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11752 {
11753 case 0:
11754 break;
11755 case 1:
11756 {
11757 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11758 u32EffAddr += i8Disp;
11759 break;
11760 }
11761 case 2:
11762 {
11763 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11764 u32EffAddr += u32Disp;
11765 break;
11766 }
11767 default:
11768 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11769 }
11770
11771 }
11772 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11773 *pGCPtrEff = u32EffAddr;
11774 else
11775 {
11776 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11777 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11778 }
11779 }
11780 }
11781 else
11782 {
11783 uint64_t u64EffAddr;
11784
11785 /* Handle the rip+disp32 form with no registers first. */
11786 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11787 {
11788 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11789 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11790 }
11791 else
11792 {
11793 /* Get the register (or SIB) value. */
11794 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11795 {
11796 case 0: u64EffAddr = pCtx->rax; break;
11797 case 1: u64EffAddr = pCtx->rcx; break;
11798 case 2: u64EffAddr = pCtx->rdx; break;
11799 case 3: u64EffAddr = pCtx->rbx; break;
11800 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11801 case 6: u64EffAddr = pCtx->rsi; break;
11802 case 7: u64EffAddr = pCtx->rdi; break;
11803 case 8: u64EffAddr = pCtx->r8; break;
11804 case 9: u64EffAddr = pCtx->r9; break;
11805 case 10: u64EffAddr = pCtx->r10; break;
11806 case 11: u64EffAddr = pCtx->r11; break;
11807 case 13: u64EffAddr = pCtx->r13; break;
11808 case 14: u64EffAddr = pCtx->r14; break;
11809 case 15: u64EffAddr = pCtx->r15; break;
11810 /* SIB */
11811 case 4:
11812 case 12:
11813 {
11814 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11815
11816 /* Get the index and scale it. */
11817 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11818 {
11819 case 0: u64EffAddr = pCtx->rax; break;
11820 case 1: u64EffAddr = pCtx->rcx; break;
11821 case 2: u64EffAddr = pCtx->rdx; break;
11822 case 3: u64EffAddr = pCtx->rbx; break;
11823 case 4: u64EffAddr = 0; /*none */ break;
11824 case 5: u64EffAddr = pCtx->rbp; break;
11825 case 6: u64EffAddr = pCtx->rsi; break;
11826 case 7: u64EffAddr = pCtx->rdi; break;
11827 case 8: u64EffAddr = pCtx->r8; break;
11828 case 9: u64EffAddr = pCtx->r9; break;
11829 case 10: u64EffAddr = pCtx->r10; break;
11830 case 11: u64EffAddr = pCtx->r11; break;
11831 case 12: u64EffAddr = pCtx->r12; break;
11832 case 13: u64EffAddr = pCtx->r13; break;
11833 case 14: u64EffAddr = pCtx->r14; break;
11834 case 15: u64EffAddr = pCtx->r15; break;
11835 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11836 }
11837 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11838
11839 /* add base */
11840 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11841 {
11842 case 0: u64EffAddr += pCtx->rax; break;
11843 case 1: u64EffAddr += pCtx->rcx; break;
11844 case 2: u64EffAddr += pCtx->rdx; break;
11845 case 3: u64EffAddr += pCtx->rbx; break;
11846 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11847 case 6: u64EffAddr += pCtx->rsi; break;
11848 case 7: u64EffAddr += pCtx->rdi; break;
11849 case 8: u64EffAddr += pCtx->r8; break;
11850 case 9: u64EffAddr += pCtx->r9; break;
11851 case 10: u64EffAddr += pCtx->r10; break;
11852 case 11: u64EffAddr += pCtx->r11; break;
11853 case 12: u64EffAddr += pCtx->r12; break;
11854 case 14: u64EffAddr += pCtx->r14; break;
11855 case 15: u64EffAddr += pCtx->r15; break;
11856 /* complicated encodings */
11857 case 5:
11858 case 13:
11859 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11860 {
11861 if (!pVCpu->iem.s.uRexB)
11862 {
11863 u64EffAddr += pCtx->rbp;
11864 SET_SS_DEF();
11865 }
11866 else
11867 u64EffAddr += pCtx->r13;
11868 }
11869 else
11870 {
11871 uint32_t u32Disp;
11872 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11873 u64EffAddr += (int32_t)u32Disp;
11874 }
11875 break;
11876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11877 }
11878 break;
11879 }
11880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11881 }
11882
11883 /* Get and add the displacement. */
11884 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11885 {
11886 case 0:
11887 break;
11888 case 1:
11889 {
11890 int8_t i8Disp;
11891 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11892 u64EffAddr += i8Disp;
11893 break;
11894 }
11895 case 2:
11896 {
11897 uint32_t u32Disp;
11898 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11899 u64EffAddr += (int32_t)u32Disp;
11900 break;
11901 }
11902 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11903 }
11904
11905 }
11906
11907 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11908 *pGCPtrEff = u64EffAddr;
11909 else
11910 {
11911 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11912 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11913 }
11914 }
11915
11916 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11917 return VINF_SUCCESS;
11918}
11919
11920
11921/**
11922 * Calculates the effective address of a ModR/M memory operand.
11923 *
11924 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11925 *
11926 * @return Strict VBox status code.
11927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11928 * @param bRm The ModRM byte.
11929 * @param cbImm The size of any immediate following the
11930 * effective address opcode bytes. Important for
11931 * RIP relative addressing.
11932 * @param pGCPtrEff Where to return the effective address.
11933 * @param offRsp RSP displacement.
11934 */
11935IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11936{
11937 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11938 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11939# define SET_SS_DEF() \
11940 do \
11941 { \
11942 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11943 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11944 } while (0)
11945
11946 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11947 {
11948/** @todo Check the effective address size crap! */
11949 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11950 {
11951 uint16_t u16EffAddr;
11952
11953 /* Handle the disp16 form with no registers first. */
11954 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11955 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11956 else
11957 {
11958 /* Get the displacment. */
11959 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11960 {
11961 case 0: u16EffAddr = 0; break;
11962 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11963 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11964 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11965 }
11966
11967 /* Add the base and index registers to the disp. */
11968 switch (bRm & X86_MODRM_RM_MASK)
11969 {
11970 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11971 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11972 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11973 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11974 case 4: u16EffAddr += pCtx->si; break;
11975 case 5: u16EffAddr += pCtx->di; break;
11976 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11977 case 7: u16EffAddr += pCtx->bx; break;
11978 }
11979 }
11980
11981 *pGCPtrEff = u16EffAddr;
11982 }
11983 else
11984 {
11985 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11986 uint32_t u32EffAddr;
11987
11988 /* Handle the disp32 form with no registers first. */
11989 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11990 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11991 else
11992 {
11993 /* Get the register (or SIB) value. */
11994 switch ((bRm & X86_MODRM_RM_MASK))
11995 {
11996 case 0: u32EffAddr = pCtx->eax; break;
11997 case 1: u32EffAddr = pCtx->ecx; break;
11998 case 2: u32EffAddr = pCtx->edx; break;
11999 case 3: u32EffAddr = pCtx->ebx; break;
12000 case 4: /* SIB */
12001 {
12002 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12003
12004 /* Get the index and scale it. */
12005 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12006 {
12007 case 0: u32EffAddr = pCtx->eax; break;
12008 case 1: u32EffAddr = pCtx->ecx; break;
12009 case 2: u32EffAddr = pCtx->edx; break;
12010 case 3: u32EffAddr = pCtx->ebx; break;
12011 case 4: u32EffAddr = 0; /*none */ break;
12012 case 5: u32EffAddr = pCtx->ebp; break;
12013 case 6: u32EffAddr = pCtx->esi; break;
12014 case 7: u32EffAddr = pCtx->edi; break;
12015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12016 }
12017 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12018
12019 /* add base */
12020 switch (bSib & X86_SIB_BASE_MASK)
12021 {
12022 case 0: u32EffAddr += pCtx->eax; break;
12023 case 1: u32EffAddr += pCtx->ecx; break;
12024 case 2: u32EffAddr += pCtx->edx; break;
12025 case 3: u32EffAddr += pCtx->ebx; break;
12026 case 4:
12027 u32EffAddr += pCtx->esp + offRsp;
12028 SET_SS_DEF();
12029 break;
12030 case 5:
12031 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12032 {
12033 u32EffAddr += pCtx->ebp;
12034 SET_SS_DEF();
12035 }
12036 else
12037 {
12038 uint32_t u32Disp;
12039 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12040 u32EffAddr += u32Disp;
12041 }
12042 break;
12043 case 6: u32EffAddr += pCtx->esi; break;
12044 case 7: u32EffAddr += pCtx->edi; break;
12045 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12046 }
12047 break;
12048 }
12049 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12050 case 6: u32EffAddr = pCtx->esi; break;
12051 case 7: u32EffAddr = pCtx->edi; break;
12052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12053 }
12054
12055 /* Get and add the displacement. */
12056 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12057 {
12058 case 0:
12059 break;
12060 case 1:
12061 {
12062 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12063 u32EffAddr += i8Disp;
12064 break;
12065 }
12066 case 2:
12067 {
12068 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12069 u32EffAddr += u32Disp;
12070 break;
12071 }
12072 default:
12073 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12074 }
12075
12076 }
12077 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12078 *pGCPtrEff = u32EffAddr;
12079 else
12080 {
12081 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12082 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12083 }
12084 }
12085 }
12086 else
12087 {
12088 uint64_t u64EffAddr;
12089
12090 /* Handle the rip+disp32 form with no registers first. */
12091 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12092 {
12093 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12094 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12095 }
12096 else
12097 {
12098 /* Get the register (or SIB) value. */
12099 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12100 {
12101 case 0: u64EffAddr = pCtx->rax; break;
12102 case 1: u64EffAddr = pCtx->rcx; break;
12103 case 2: u64EffAddr = pCtx->rdx; break;
12104 case 3: u64EffAddr = pCtx->rbx; break;
12105 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12106 case 6: u64EffAddr = pCtx->rsi; break;
12107 case 7: u64EffAddr = pCtx->rdi; break;
12108 case 8: u64EffAddr = pCtx->r8; break;
12109 case 9: u64EffAddr = pCtx->r9; break;
12110 case 10: u64EffAddr = pCtx->r10; break;
12111 case 11: u64EffAddr = pCtx->r11; break;
12112 case 13: u64EffAddr = pCtx->r13; break;
12113 case 14: u64EffAddr = pCtx->r14; break;
12114 case 15: u64EffAddr = pCtx->r15; break;
12115 /* SIB */
12116 case 4:
12117 case 12:
12118 {
12119 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12120
12121 /* Get the index and scale it. */
12122 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12123 {
12124 case 0: u64EffAddr = pCtx->rax; break;
12125 case 1: u64EffAddr = pCtx->rcx; break;
12126 case 2: u64EffAddr = pCtx->rdx; break;
12127 case 3: u64EffAddr = pCtx->rbx; break;
12128 case 4: u64EffAddr = 0; /*none */ break;
12129 case 5: u64EffAddr = pCtx->rbp; break;
12130 case 6: u64EffAddr = pCtx->rsi; break;
12131 case 7: u64EffAddr = pCtx->rdi; break;
12132 case 8: u64EffAddr = pCtx->r8; break;
12133 case 9: u64EffAddr = pCtx->r9; break;
12134 case 10: u64EffAddr = pCtx->r10; break;
12135 case 11: u64EffAddr = pCtx->r11; break;
12136 case 12: u64EffAddr = pCtx->r12; break;
12137 case 13: u64EffAddr = pCtx->r13; break;
12138 case 14: u64EffAddr = pCtx->r14; break;
12139 case 15: u64EffAddr = pCtx->r15; break;
12140 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12141 }
12142 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12143
12144 /* add base */
12145 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12146 {
12147 case 0: u64EffAddr += pCtx->rax; break;
12148 case 1: u64EffAddr += pCtx->rcx; break;
12149 case 2: u64EffAddr += pCtx->rdx; break;
12150 case 3: u64EffAddr += pCtx->rbx; break;
12151 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12152 case 6: u64EffAddr += pCtx->rsi; break;
12153 case 7: u64EffAddr += pCtx->rdi; break;
12154 case 8: u64EffAddr += pCtx->r8; break;
12155 case 9: u64EffAddr += pCtx->r9; break;
12156 case 10: u64EffAddr += pCtx->r10; break;
12157 case 11: u64EffAddr += pCtx->r11; break;
12158 case 12: u64EffAddr += pCtx->r12; break;
12159 case 14: u64EffAddr += pCtx->r14; break;
12160 case 15: u64EffAddr += pCtx->r15; break;
12161 /* complicated encodings */
12162 case 5:
12163 case 13:
12164 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12165 {
12166 if (!pVCpu->iem.s.uRexB)
12167 {
12168 u64EffAddr += pCtx->rbp;
12169 SET_SS_DEF();
12170 }
12171 else
12172 u64EffAddr += pCtx->r13;
12173 }
12174 else
12175 {
12176 uint32_t u32Disp;
12177 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12178 u64EffAddr += (int32_t)u32Disp;
12179 }
12180 break;
12181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12182 }
12183 break;
12184 }
12185 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12186 }
12187
12188 /* Get and add the displacement. */
12189 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12190 {
12191 case 0:
12192 break;
12193 case 1:
12194 {
12195 int8_t i8Disp;
12196 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12197 u64EffAddr += i8Disp;
12198 break;
12199 }
12200 case 2:
12201 {
12202 uint32_t u32Disp;
12203 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12204 u64EffAddr += (int32_t)u32Disp;
12205 break;
12206 }
12207 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12208 }
12209
12210 }
12211
12212 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12213 *pGCPtrEff = u64EffAddr;
12214 else
12215 {
12216 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12217 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12218 }
12219 }
12220
12221 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12222 return VINF_SUCCESS;
12223}
12224
12225
12226#ifdef IEM_WITH_SETJMP
12227/**
12228 * Calculates the effective address of a ModR/M memory operand.
12229 *
12230 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12231 *
12232 * May longjmp on internal error.
12233 *
12234 * @return The effective address.
12235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12236 * @param bRm The ModRM byte.
12237 * @param cbImm The size of any immediate following the
12238 * effective address opcode bytes. Important for
12239 * RIP relative addressing.
12240 */
12241IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12242{
12243 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12244 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12245# define SET_SS_DEF() \
12246 do \
12247 { \
12248 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12249 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12250 } while (0)
12251
12252 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12253 {
12254/** @todo Check the effective address size crap! */
12255 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12256 {
12257 uint16_t u16EffAddr;
12258
12259 /* Handle the disp16 form with no registers first. */
12260 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12261 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12262 else
12263 {
12264 /* Get the displacment. */
12265 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12266 {
12267 case 0: u16EffAddr = 0; break;
12268 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12269 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12270 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12271 }
12272
12273 /* Add the base and index registers to the disp. */
12274 switch (bRm & X86_MODRM_RM_MASK)
12275 {
12276 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12277 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12278 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12279 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12280 case 4: u16EffAddr += pCtx->si; break;
12281 case 5: u16EffAddr += pCtx->di; break;
12282 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12283 case 7: u16EffAddr += pCtx->bx; break;
12284 }
12285 }
12286
12287 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12288 return u16EffAddr;
12289 }
12290
12291 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12292 uint32_t u32EffAddr;
12293
12294 /* Handle the disp32 form with no registers first. */
12295 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12296 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12297 else
12298 {
12299 /* Get the register (or SIB) value. */
12300 switch ((bRm & X86_MODRM_RM_MASK))
12301 {
12302 case 0: u32EffAddr = pCtx->eax; break;
12303 case 1: u32EffAddr = pCtx->ecx; break;
12304 case 2: u32EffAddr = pCtx->edx; break;
12305 case 3: u32EffAddr = pCtx->ebx; break;
12306 case 4: /* SIB */
12307 {
12308 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12309
12310 /* Get the index and scale it. */
12311 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12312 {
12313 case 0: u32EffAddr = pCtx->eax; break;
12314 case 1: u32EffAddr = pCtx->ecx; break;
12315 case 2: u32EffAddr = pCtx->edx; break;
12316 case 3: u32EffAddr = pCtx->ebx; break;
12317 case 4: u32EffAddr = 0; /*none */ break;
12318 case 5: u32EffAddr = pCtx->ebp; break;
12319 case 6: u32EffAddr = pCtx->esi; break;
12320 case 7: u32EffAddr = pCtx->edi; break;
12321 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12322 }
12323 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12324
12325 /* add base */
12326 switch (bSib & X86_SIB_BASE_MASK)
12327 {
12328 case 0: u32EffAddr += pCtx->eax; break;
12329 case 1: u32EffAddr += pCtx->ecx; break;
12330 case 2: u32EffAddr += pCtx->edx; break;
12331 case 3: u32EffAddr += pCtx->ebx; break;
12332 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12333 case 5:
12334 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12335 {
12336 u32EffAddr += pCtx->ebp;
12337 SET_SS_DEF();
12338 }
12339 else
12340 {
12341 uint32_t u32Disp;
12342 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12343 u32EffAddr += u32Disp;
12344 }
12345 break;
12346 case 6: u32EffAddr += pCtx->esi; break;
12347 case 7: u32EffAddr += pCtx->edi; break;
12348 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12349 }
12350 break;
12351 }
12352 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12353 case 6: u32EffAddr = pCtx->esi; break;
12354 case 7: u32EffAddr = pCtx->edi; break;
12355 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12356 }
12357
12358 /* Get and add the displacement. */
12359 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12360 {
12361 case 0:
12362 break;
12363 case 1:
12364 {
12365 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12366 u32EffAddr += i8Disp;
12367 break;
12368 }
12369 case 2:
12370 {
12371 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12372 u32EffAddr += u32Disp;
12373 break;
12374 }
12375 default:
12376 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12377 }
12378 }
12379
12380 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12381 {
12382 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12383 return u32EffAddr;
12384 }
12385 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12386 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12387 return u32EffAddr & UINT16_MAX;
12388 }
12389
12390 uint64_t u64EffAddr;
12391
12392 /* Handle the rip+disp32 form with no registers first. */
12393 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12394 {
12395 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12396 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12397 }
12398 else
12399 {
12400 /* Get the register (or SIB) value. */
12401 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12402 {
12403 case 0: u64EffAddr = pCtx->rax; break;
12404 case 1: u64EffAddr = pCtx->rcx; break;
12405 case 2: u64EffAddr = pCtx->rdx; break;
12406 case 3: u64EffAddr = pCtx->rbx; break;
12407 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12408 case 6: u64EffAddr = pCtx->rsi; break;
12409 case 7: u64EffAddr = pCtx->rdi; break;
12410 case 8: u64EffAddr = pCtx->r8; break;
12411 case 9: u64EffAddr = pCtx->r9; break;
12412 case 10: u64EffAddr = pCtx->r10; break;
12413 case 11: u64EffAddr = pCtx->r11; break;
12414 case 13: u64EffAddr = pCtx->r13; break;
12415 case 14: u64EffAddr = pCtx->r14; break;
12416 case 15: u64EffAddr = pCtx->r15; break;
12417 /* SIB */
12418 case 4:
12419 case 12:
12420 {
12421 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12422
12423 /* Get the index and scale it. */
12424 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12425 {
12426 case 0: u64EffAddr = pCtx->rax; break;
12427 case 1: u64EffAddr = pCtx->rcx; break;
12428 case 2: u64EffAddr = pCtx->rdx; break;
12429 case 3: u64EffAddr = pCtx->rbx; break;
12430 case 4: u64EffAddr = 0; /*none */ break;
12431 case 5: u64EffAddr = pCtx->rbp; break;
12432 case 6: u64EffAddr = pCtx->rsi; break;
12433 case 7: u64EffAddr = pCtx->rdi; break;
12434 case 8: u64EffAddr = pCtx->r8; break;
12435 case 9: u64EffAddr = pCtx->r9; break;
12436 case 10: u64EffAddr = pCtx->r10; break;
12437 case 11: u64EffAddr = pCtx->r11; break;
12438 case 12: u64EffAddr = pCtx->r12; break;
12439 case 13: u64EffAddr = pCtx->r13; break;
12440 case 14: u64EffAddr = pCtx->r14; break;
12441 case 15: u64EffAddr = pCtx->r15; break;
12442 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12443 }
12444 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12445
12446 /* add base */
12447 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12448 {
12449 case 0: u64EffAddr += pCtx->rax; break;
12450 case 1: u64EffAddr += pCtx->rcx; break;
12451 case 2: u64EffAddr += pCtx->rdx; break;
12452 case 3: u64EffAddr += pCtx->rbx; break;
12453 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12454 case 6: u64EffAddr += pCtx->rsi; break;
12455 case 7: u64EffAddr += pCtx->rdi; break;
12456 case 8: u64EffAddr += pCtx->r8; break;
12457 case 9: u64EffAddr += pCtx->r9; break;
12458 case 10: u64EffAddr += pCtx->r10; break;
12459 case 11: u64EffAddr += pCtx->r11; break;
12460 case 12: u64EffAddr += pCtx->r12; break;
12461 case 14: u64EffAddr += pCtx->r14; break;
12462 case 15: u64EffAddr += pCtx->r15; break;
12463 /* complicated encodings */
12464 case 5:
12465 case 13:
12466 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12467 {
12468 if (!pVCpu->iem.s.uRexB)
12469 {
12470 u64EffAddr += pCtx->rbp;
12471 SET_SS_DEF();
12472 }
12473 else
12474 u64EffAddr += pCtx->r13;
12475 }
12476 else
12477 {
12478 uint32_t u32Disp;
12479 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12480 u64EffAddr += (int32_t)u32Disp;
12481 }
12482 break;
12483 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12484 }
12485 break;
12486 }
12487 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12488 }
12489
12490 /* Get and add the displacement. */
12491 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12492 {
12493 case 0:
12494 break;
12495 case 1:
12496 {
12497 int8_t i8Disp;
12498 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12499 u64EffAddr += i8Disp;
12500 break;
12501 }
12502 case 2:
12503 {
12504 uint32_t u32Disp;
12505 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12506 u64EffAddr += (int32_t)u32Disp;
12507 break;
12508 }
12509 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12510 }
12511
12512 }
12513
12514 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12515 {
12516 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12517 return u64EffAddr;
12518 }
12519 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12520 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12521 return u64EffAddr & UINT32_MAX;
12522}
12523#endif /* IEM_WITH_SETJMP */
12524
12525
12526/** @} */
12527
12528
12529
12530/*
12531 * Include the instructions
12532 */
12533#include "IEMAllInstructions.cpp.h"
12534
12535
12536
12537
12538#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12539
12540/**
12541 * Sets up execution verification mode.
12542 */
12543IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12544{
12545 PVMCPU pVCpu = pVCpu;
12546 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12547
12548 /*
12549 * Always note down the address of the current instruction.
12550 */
12551 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12552 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12553
12554 /*
12555 * Enable verification and/or logging.
12556 */
12557 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12558 if ( fNewNoRem
12559 && ( 0
12560#if 0 /* auto enable on first paged protected mode interrupt */
12561 || ( pOrgCtx->eflags.Bits.u1IF
12562 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12563 && TRPMHasTrap(pVCpu)
12564 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12565#endif
12566#if 0
12567 || ( pOrgCtx->cs == 0x10
12568 && ( pOrgCtx->rip == 0x90119e3e
12569 || pOrgCtx->rip == 0x901d9810)
12570#endif
12571#if 0 /* Auto enable DSL - FPU stuff. */
12572 || ( pOrgCtx->cs == 0x10
12573 && (// pOrgCtx->rip == 0xc02ec07f
12574 //|| pOrgCtx->rip == 0xc02ec082
12575 //|| pOrgCtx->rip == 0xc02ec0c9
12576 0
12577 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12578#endif
12579#if 0 /* Auto enable DSL - fstp st0 stuff. */
12580 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12581#endif
12582#if 0
12583 || pOrgCtx->rip == 0x9022bb3a
12584#endif
12585#if 0
12586 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12587#endif
12588#if 0
12589 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12590 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12591#endif
12592#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12593 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12594 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12595 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12596#endif
12597#if 0 /* NT4SP1 - xadd early boot. */
12598 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12599#endif
12600#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12601 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12602#endif
12603#if 0 /* NT4SP1 - cmpxchg (AMD). */
12604 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12605#endif
12606#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12607 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12608#endif
12609#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12610 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12611
12612#endif
12613#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12614 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12615
12616#endif
12617#if 0 /* NT4SP1 - frstor [ecx] */
12618 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12619#endif
12620#if 0 /* xxxxxx - All long mode code. */
12621 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12622#endif
12623#if 0 /* rep movsq linux 3.7 64-bit boot. */
12624 || (pOrgCtx->rip == 0x0000000000100241)
12625#endif
12626#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12627 || (pOrgCtx->rip == 0x000000000215e240)
12628#endif
12629#if 0 /* DOS's size-overridden iret to v8086. */
12630 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12631#endif
12632 )
12633 )
12634 {
12635 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12636 RTLogFlags(NULL, "enabled");
12637 fNewNoRem = false;
12638 }
12639 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12640 {
12641 pVCpu->iem.s.fNoRem = fNewNoRem;
12642 if (!fNewNoRem)
12643 {
12644 LogAlways(("Enabling verification mode!\n"));
12645 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12646 }
12647 else
12648 LogAlways(("Disabling verification mode!\n"));
12649 }
12650
12651 /*
12652 * Switch state.
12653 */
12654 if (IEM_VERIFICATION_ENABLED(pVCpu))
12655 {
12656 static CPUMCTX s_DebugCtx; /* Ugly! */
12657
12658 s_DebugCtx = *pOrgCtx;
12659 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12660 }
12661
12662 /*
12663 * See if there is an interrupt pending in TRPM and inject it if we can.
12664 */
12665 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12666 if ( pOrgCtx->eflags.Bits.u1IF
12667 && TRPMHasTrap(pVCpu)
12668 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12669 {
12670 uint8_t u8TrapNo;
12671 TRPMEVENT enmType;
12672 RTGCUINT uErrCode;
12673 RTGCPTR uCr2;
12674 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12675 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12676 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12677 TRPMResetTrap(pVCpu);
12678 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12679 }
12680
12681 /*
12682 * Reset the counters.
12683 */
12684 pVCpu->iem.s.cIOReads = 0;
12685 pVCpu->iem.s.cIOWrites = 0;
12686 pVCpu->iem.s.fIgnoreRaxRdx = false;
12687 pVCpu->iem.s.fOverlappingMovs = false;
12688 pVCpu->iem.s.fProblematicMemory = false;
12689 pVCpu->iem.s.fUndefinedEFlags = 0;
12690
12691 if (IEM_VERIFICATION_ENABLED(pVCpu))
12692 {
12693 /*
12694 * Free all verification records.
12695 */
12696 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12697 pVCpu->iem.s.pIemEvtRecHead = NULL;
12698 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12699 do
12700 {
12701 while (pEvtRec)
12702 {
12703 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12704 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12705 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12706 pEvtRec = pNext;
12707 }
12708 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12709 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12710 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12711 } while (pEvtRec);
12712 }
12713}
12714
12715
12716/**
12717 * Allocate an event record.
12718 * @returns Pointer to a record.
12719 */
12720IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12721{
12722 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12723 return NULL;
12724
12725 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12726 if (pEvtRec)
12727 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12728 else
12729 {
12730 if (!pVCpu->iem.s.ppIemEvtRecNext)
12731 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12732
12733 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12734 if (!pEvtRec)
12735 return NULL;
12736 }
12737 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12738 pEvtRec->pNext = NULL;
12739 return pEvtRec;
12740}
12741
12742
12743/**
12744 * IOMMMIORead notification.
12745 */
12746VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12747{
12748 PVMCPU pVCpu = VMMGetCpu(pVM);
12749 if (!pVCpu)
12750 return;
12751 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12752 if (!pEvtRec)
12753 return;
12754 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12755 pEvtRec->u.RamRead.GCPhys = GCPhys;
12756 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12757 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12758 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12759}
12760
12761
12762/**
12763 * IOMMMIOWrite notification.
12764 */
12765VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12766{
12767 PVMCPU pVCpu = VMMGetCpu(pVM);
12768 if (!pVCpu)
12769 return;
12770 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12771 if (!pEvtRec)
12772 return;
12773 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12774 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12775 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12776 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12777 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12778 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12779 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12780 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12781 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12782}
12783
12784
12785/**
12786 * IOMIOPortRead notification.
12787 */
12788VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12789{
12790 PVMCPU pVCpu = VMMGetCpu(pVM);
12791 if (!pVCpu)
12792 return;
12793 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12794 if (!pEvtRec)
12795 return;
12796 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12797 pEvtRec->u.IOPortRead.Port = Port;
12798 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12799 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12800 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12801}
12802
12803/**
12804 * IOMIOPortWrite notification.
12805 */
12806VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12807{
12808 PVMCPU pVCpu = VMMGetCpu(pVM);
12809 if (!pVCpu)
12810 return;
12811 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12812 if (!pEvtRec)
12813 return;
12814 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12815 pEvtRec->u.IOPortWrite.Port = Port;
12816 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12817 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12818 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12819 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12820}
12821
12822
12823VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12824{
12825 PVMCPU pVCpu = VMMGetCpu(pVM);
12826 if (!pVCpu)
12827 return;
12828 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12829 if (!pEvtRec)
12830 return;
12831 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12832 pEvtRec->u.IOPortStrRead.Port = Port;
12833 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12834 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12835 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12836 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12837}
12838
12839
12840VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12841{
12842 PVMCPU pVCpu = VMMGetCpu(pVM);
12843 if (!pVCpu)
12844 return;
12845 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12846 if (!pEvtRec)
12847 return;
12848 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12849 pEvtRec->u.IOPortStrWrite.Port = Port;
12850 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12851 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12852 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12853 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12854}
12855
12856
12857/**
12858 * Fakes and records an I/O port read.
12859 *
12860 * @returns VINF_SUCCESS.
12861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12862 * @param Port The I/O port.
12863 * @param pu32Value Where to store the fake value.
12864 * @param cbValue The size of the access.
12865 */
12866IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12867{
12868 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12869 if (pEvtRec)
12870 {
12871 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12872 pEvtRec->u.IOPortRead.Port = Port;
12873 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12874 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12875 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12876 }
12877 pVCpu->iem.s.cIOReads++;
12878 *pu32Value = 0xcccccccc;
12879 return VINF_SUCCESS;
12880}
12881
12882
12883/**
12884 * Fakes and records an I/O port write.
12885 *
12886 * @returns VINF_SUCCESS.
12887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12888 * @param Port The I/O port.
12889 * @param u32Value The value being written.
12890 * @param cbValue The size of the access.
12891 */
12892IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12893{
12894 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12895 if (pEvtRec)
12896 {
12897 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12898 pEvtRec->u.IOPortWrite.Port = Port;
12899 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12900 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12901 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12902 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12903 }
12904 pVCpu->iem.s.cIOWrites++;
12905 return VINF_SUCCESS;
12906}
12907
12908
12909/**
12910 * Used to add extra details about a stub case.
12911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12912 */
12913IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12914{
12915 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12916 PVM pVM = pVCpu->CTX_SUFF(pVM);
12917 PVMCPU pVCpu = pVCpu;
12918 char szRegs[4096];
12919 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12920 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12921 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12922 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12923 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12924 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12925 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12926 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12927 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12928 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12929 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12930 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12931 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12932 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12933 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12934 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12935 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12936 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12937 " efer=%016VR{efer}\n"
12938 " pat=%016VR{pat}\n"
12939 " sf_mask=%016VR{sf_mask}\n"
12940 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12941 " lstar=%016VR{lstar}\n"
12942 " star=%016VR{star} cstar=%016VR{cstar}\n"
12943 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12944 );
12945
12946 char szInstr1[256];
12947 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12948 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12949 szInstr1, sizeof(szInstr1), NULL);
12950 char szInstr2[256];
12951 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12952 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12953 szInstr2, sizeof(szInstr2), NULL);
12954
12955 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12956}
12957
12958
12959/**
12960 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12961 * dump to the assertion info.
12962 *
12963 * @param pEvtRec The record to dump.
12964 */
12965IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12966{
12967 switch (pEvtRec->enmEvent)
12968 {
12969 case IEMVERIFYEVENT_IOPORT_READ:
12970 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12971 pEvtRec->u.IOPortWrite.Port,
12972 pEvtRec->u.IOPortWrite.cbValue);
12973 break;
12974 case IEMVERIFYEVENT_IOPORT_WRITE:
12975 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12976 pEvtRec->u.IOPortWrite.Port,
12977 pEvtRec->u.IOPortWrite.cbValue,
12978 pEvtRec->u.IOPortWrite.u32Value);
12979 break;
12980 case IEMVERIFYEVENT_IOPORT_STR_READ:
12981 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12982 pEvtRec->u.IOPortStrWrite.Port,
12983 pEvtRec->u.IOPortStrWrite.cbValue,
12984 pEvtRec->u.IOPortStrWrite.cTransfers);
12985 break;
12986 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12987 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12988 pEvtRec->u.IOPortStrWrite.Port,
12989 pEvtRec->u.IOPortStrWrite.cbValue,
12990 pEvtRec->u.IOPortStrWrite.cTransfers);
12991 break;
12992 case IEMVERIFYEVENT_RAM_READ:
12993 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12994 pEvtRec->u.RamRead.GCPhys,
12995 pEvtRec->u.RamRead.cb);
12996 break;
12997 case IEMVERIFYEVENT_RAM_WRITE:
12998 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12999 pEvtRec->u.RamWrite.GCPhys,
13000 pEvtRec->u.RamWrite.cb,
13001 (int)pEvtRec->u.RamWrite.cb,
13002 pEvtRec->u.RamWrite.ab);
13003 break;
13004 default:
13005 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13006 break;
13007 }
13008}
13009
13010
13011/**
13012 * Raises an assertion on the specified record, showing the given message with
13013 * a record dump attached.
13014 *
13015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13016 * @param pEvtRec1 The first record.
13017 * @param pEvtRec2 The second record.
13018 * @param pszMsg The message explaining why we're asserting.
13019 */
13020IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13021{
13022 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13023 iemVerifyAssertAddRecordDump(pEvtRec1);
13024 iemVerifyAssertAddRecordDump(pEvtRec2);
13025 iemVerifyAssertMsg2(pVCpu);
13026 RTAssertPanic();
13027}
13028
13029
13030/**
13031 * Raises an assertion on the specified record, showing the given message with
13032 * a record dump attached.
13033 *
13034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13035 * @param pEvtRec1 The first record.
13036 * @param pszMsg The message explaining why we're asserting.
13037 */
13038IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13039{
13040 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13041 iemVerifyAssertAddRecordDump(pEvtRec);
13042 iemVerifyAssertMsg2(pVCpu);
13043 RTAssertPanic();
13044}
13045
13046
13047/**
13048 * Verifies a write record.
13049 *
13050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13051 * @param pEvtRec The write record.
13052 * @param fRem Set if REM was doing the other executing. If clear
13053 * it was HM.
13054 */
13055IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13056{
13057 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13058 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13059 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13060 if ( RT_FAILURE(rc)
13061 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13062 {
13063 /* fend off ins */
13064 if ( !pVCpu->iem.s.cIOReads
13065 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13066 || ( pEvtRec->u.RamWrite.cb != 1
13067 && pEvtRec->u.RamWrite.cb != 2
13068 && pEvtRec->u.RamWrite.cb != 4) )
13069 {
13070 /* fend off ROMs and MMIO */
13071 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13072 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13073 {
13074 /* fend off fxsave */
13075 if (pEvtRec->u.RamWrite.cb != 512)
13076 {
13077 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13078 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13079 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13080 RTAssertMsg2Add("%s: %.*Rhxs\n"
13081 "iem: %.*Rhxs\n",
13082 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13083 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13084 iemVerifyAssertAddRecordDump(pEvtRec);
13085 iemVerifyAssertMsg2(pVCpu);
13086 RTAssertPanic();
13087 }
13088 }
13089 }
13090 }
13091
13092}
13093
13094/**
13095 * Performs the post-execution verfication checks.
13096 */
13097IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13098{
13099 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13100 return rcStrictIem;
13101
13102 /*
13103 * Switch back the state.
13104 */
13105 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13106 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13107 Assert(pOrgCtx != pDebugCtx);
13108 IEM_GET_CTX(pVCpu) = pOrgCtx;
13109
13110 /*
13111 * Execute the instruction in REM.
13112 */
13113 bool fRem = false;
13114 PVM pVM = pVCpu->CTX_SUFF(pVM);
13115 PVMCPU pVCpu = pVCpu;
13116 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13117#ifdef IEM_VERIFICATION_MODE_FULL_HM
13118 if ( HMIsEnabled(pVM)
13119 && pVCpu->iem.s.cIOReads == 0
13120 && pVCpu->iem.s.cIOWrites == 0
13121 && !pVCpu->iem.s.fProblematicMemory)
13122 {
13123 uint64_t uStartRip = pOrgCtx->rip;
13124 unsigned iLoops = 0;
13125 do
13126 {
13127 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13128 iLoops++;
13129 } while ( rc == VINF_SUCCESS
13130 || ( rc == VINF_EM_DBG_STEPPED
13131 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13132 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13133 || ( pOrgCtx->rip != pDebugCtx->rip
13134 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13135 && iLoops < 8) );
13136 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13137 rc = VINF_SUCCESS;
13138 }
13139#endif
13140 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13141 || rc == VINF_IOM_R3_IOPORT_READ
13142 || rc == VINF_IOM_R3_IOPORT_WRITE
13143 || rc == VINF_IOM_R3_MMIO_READ
13144 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13145 || rc == VINF_IOM_R3_MMIO_WRITE
13146 || rc == VINF_CPUM_R3_MSR_READ
13147 || rc == VINF_CPUM_R3_MSR_WRITE
13148 || rc == VINF_EM_RESCHEDULE
13149 )
13150 {
13151 EMRemLock(pVM);
13152 rc = REMR3EmulateInstruction(pVM, pVCpu);
13153 AssertRC(rc);
13154 EMRemUnlock(pVM);
13155 fRem = true;
13156 }
13157
13158# if 1 /* Skip unimplemented instructions for now. */
13159 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13160 {
13161 IEM_GET_CTX(pVCpu) = pOrgCtx;
13162 if (rc == VINF_EM_DBG_STEPPED)
13163 return VINF_SUCCESS;
13164 return rc;
13165 }
13166# endif
13167
13168 /*
13169 * Compare the register states.
13170 */
13171 unsigned cDiffs = 0;
13172 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13173 {
13174 //Log(("REM and IEM ends up with different registers!\n"));
13175 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13176
13177# define CHECK_FIELD(a_Field) \
13178 do \
13179 { \
13180 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13181 { \
13182 switch (sizeof(pOrgCtx->a_Field)) \
13183 { \
13184 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13185 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13186 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13187 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13188 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13189 } \
13190 cDiffs++; \
13191 } \
13192 } while (0)
13193# define CHECK_XSTATE_FIELD(a_Field) \
13194 do \
13195 { \
13196 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13197 { \
13198 switch (sizeof(pOrgXState->a_Field)) \
13199 { \
13200 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13201 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13202 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13203 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13204 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13205 } \
13206 cDiffs++; \
13207 } \
13208 } while (0)
13209
13210# define CHECK_BIT_FIELD(a_Field) \
13211 do \
13212 { \
13213 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13214 { \
13215 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13216 cDiffs++; \
13217 } \
13218 } while (0)
13219
13220# define CHECK_SEL(a_Sel) \
13221 do \
13222 { \
13223 CHECK_FIELD(a_Sel.Sel); \
13224 CHECK_FIELD(a_Sel.Attr.u); \
13225 CHECK_FIELD(a_Sel.u64Base); \
13226 CHECK_FIELD(a_Sel.u32Limit); \
13227 CHECK_FIELD(a_Sel.fFlags); \
13228 } while (0)
13229
13230 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13231 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13232
13233#if 1 /* The recompiler doesn't update these the intel way. */
13234 if (fRem)
13235 {
13236 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13237 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13238 pOrgXState->x87.CS = pDebugXState->x87.CS;
13239 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13240 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13241 pOrgXState->x87.DS = pDebugXState->x87.DS;
13242 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13243 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13244 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13245 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13246 }
13247#endif
13248 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13249 {
13250 RTAssertMsg2Weak(" the FPU state differs\n");
13251 cDiffs++;
13252 CHECK_XSTATE_FIELD(x87.FCW);
13253 CHECK_XSTATE_FIELD(x87.FSW);
13254 CHECK_XSTATE_FIELD(x87.FTW);
13255 CHECK_XSTATE_FIELD(x87.FOP);
13256 CHECK_XSTATE_FIELD(x87.FPUIP);
13257 CHECK_XSTATE_FIELD(x87.CS);
13258 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13259 CHECK_XSTATE_FIELD(x87.FPUDP);
13260 CHECK_XSTATE_FIELD(x87.DS);
13261 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13262 CHECK_XSTATE_FIELD(x87.MXCSR);
13263 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13264 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13265 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13266 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13267 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13268 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13269 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13270 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13271 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13272 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13273 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13274 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13275 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13276 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13277 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13278 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13279 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13280 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13281 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13282 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13283 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13284 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13285 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13286 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13287 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13288 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13289 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13290 }
13291 CHECK_FIELD(rip);
13292 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13293 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13294 {
13295 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13296 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13297 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13298 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13299 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13300 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13301 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13302 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13303 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13304 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13305 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13306 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13307 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13308 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13309 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13310 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13311 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13312 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13313 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13314 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13315 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13316 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13317 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13318 }
13319
13320 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13321 CHECK_FIELD(rax);
13322 CHECK_FIELD(rcx);
13323 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13324 CHECK_FIELD(rdx);
13325 CHECK_FIELD(rbx);
13326 CHECK_FIELD(rsp);
13327 CHECK_FIELD(rbp);
13328 CHECK_FIELD(rsi);
13329 CHECK_FIELD(rdi);
13330 CHECK_FIELD(r8);
13331 CHECK_FIELD(r9);
13332 CHECK_FIELD(r10);
13333 CHECK_FIELD(r11);
13334 CHECK_FIELD(r12);
13335 CHECK_FIELD(r13);
13336 CHECK_SEL(cs);
13337 CHECK_SEL(ss);
13338 CHECK_SEL(ds);
13339 CHECK_SEL(es);
13340 CHECK_SEL(fs);
13341 CHECK_SEL(gs);
13342 CHECK_FIELD(cr0);
13343
13344 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13345 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13346 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13347 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13348 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13349 {
13350 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13351 { /* ignore */ }
13352 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13353 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13354 && fRem)
13355 { /* ignore */ }
13356 else
13357 CHECK_FIELD(cr2);
13358 }
13359 CHECK_FIELD(cr3);
13360 CHECK_FIELD(cr4);
13361 CHECK_FIELD(dr[0]);
13362 CHECK_FIELD(dr[1]);
13363 CHECK_FIELD(dr[2]);
13364 CHECK_FIELD(dr[3]);
13365 CHECK_FIELD(dr[6]);
13366 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13367 CHECK_FIELD(dr[7]);
13368 CHECK_FIELD(gdtr.cbGdt);
13369 CHECK_FIELD(gdtr.pGdt);
13370 CHECK_FIELD(idtr.cbIdt);
13371 CHECK_FIELD(idtr.pIdt);
13372 CHECK_SEL(ldtr);
13373 CHECK_SEL(tr);
13374 CHECK_FIELD(SysEnter.cs);
13375 CHECK_FIELD(SysEnter.eip);
13376 CHECK_FIELD(SysEnter.esp);
13377 CHECK_FIELD(msrEFER);
13378 CHECK_FIELD(msrSTAR);
13379 CHECK_FIELD(msrPAT);
13380 CHECK_FIELD(msrLSTAR);
13381 CHECK_FIELD(msrCSTAR);
13382 CHECK_FIELD(msrSFMASK);
13383 CHECK_FIELD(msrKERNELGSBASE);
13384
13385 if (cDiffs != 0)
13386 {
13387 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13388 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13389 RTAssertPanic();
13390 static bool volatile s_fEnterDebugger = true;
13391 if (s_fEnterDebugger)
13392 DBGFSTOP(pVM);
13393
13394# if 1 /* Ignore unimplemented instructions for now. */
13395 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13396 rcStrictIem = VINF_SUCCESS;
13397# endif
13398 }
13399# undef CHECK_FIELD
13400# undef CHECK_BIT_FIELD
13401 }
13402
13403 /*
13404 * If the register state compared fine, check the verification event
13405 * records.
13406 */
13407 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13408 {
13409 /*
13410 * Compare verficiation event records.
13411 * - I/O port accesses should be a 1:1 match.
13412 */
13413 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13414 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13415 while (pIemRec && pOtherRec)
13416 {
13417 /* Since we might miss RAM writes and reads, ignore reads and check
13418 that any written memory is the same extra ones. */
13419 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13420 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13421 && pIemRec->pNext)
13422 {
13423 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13424 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13425 pIemRec = pIemRec->pNext;
13426 }
13427
13428 /* Do the compare. */
13429 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13430 {
13431 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13432 break;
13433 }
13434 bool fEquals;
13435 switch (pIemRec->enmEvent)
13436 {
13437 case IEMVERIFYEVENT_IOPORT_READ:
13438 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13439 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13440 break;
13441 case IEMVERIFYEVENT_IOPORT_WRITE:
13442 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13443 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13444 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13445 break;
13446 case IEMVERIFYEVENT_IOPORT_STR_READ:
13447 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13448 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13449 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13450 break;
13451 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13452 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13453 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13454 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13455 break;
13456 case IEMVERIFYEVENT_RAM_READ:
13457 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13458 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13459 break;
13460 case IEMVERIFYEVENT_RAM_WRITE:
13461 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13462 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13463 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13464 break;
13465 default:
13466 fEquals = false;
13467 break;
13468 }
13469 if (!fEquals)
13470 {
13471 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13472 break;
13473 }
13474
13475 /* advance */
13476 pIemRec = pIemRec->pNext;
13477 pOtherRec = pOtherRec->pNext;
13478 }
13479
13480 /* Ignore extra writes and reads. */
13481 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13482 {
13483 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13484 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13485 pIemRec = pIemRec->pNext;
13486 }
13487 if (pIemRec != NULL)
13488 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13489 else if (pOtherRec != NULL)
13490 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13491 }
13492 IEM_GET_CTX(pVCpu) = pOrgCtx;
13493
13494 return rcStrictIem;
13495}
13496
13497#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13498
13499/* stubs */
13500IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13501{
13502 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13503 return VERR_INTERNAL_ERROR;
13504}
13505
13506IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13507{
13508 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13509 return VERR_INTERNAL_ERROR;
13510}
13511
13512#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13513
13514
13515#ifdef LOG_ENABLED
13516/**
13517 * Logs the current instruction.
13518 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13519 * @param pCtx The current CPU context.
13520 * @param fSameCtx Set if we have the same context information as the VMM,
13521 * clear if we may have already executed an instruction in
13522 * our debug context. When clear, we assume IEMCPU holds
13523 * valid CPU mode info.
13524 */
13525IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13526{
13527# ifdef IN_RING3
13528 if (LogIs2Enabled())
13529 {
13530 char szInstr[256];
13531 uint32_t cbInstr = 0;
13532 if (fSameCtx)
13533 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13534 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13535 szInstr, sizeof(szInstr), &cbInstr);
13536 else
13537 {
13538 uint32_t fFlags = 0;
13539 switch (pVCpu->iem.s.enmCpuMode)
13540 {
13541 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13542 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13543 case IEMMODE_16BIT:
13544 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13545 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13546 else
13547 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13548 break;
13549 }
13550 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13551 szInstr, sizeof(szInstr), &cbInstr);
13552 }
13553
13554 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13555 Log2(("****\n"
13556 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13557 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13558 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13559 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13560 " %s\n"
13561 ,
13562 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13563 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13564 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13565 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13566 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13567 szInstr));
13568
13569 if (LogIs3Enabled())
13570 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13571 }
13572 else
13573# endif
13574 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13575 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13576 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13577}
13578#endif
13579
13580
13581/**
13582 * Makes status code addjustments (pass up from I/O and access handler)
13583 * as well as maintaining statistics.
13584 *
13585 * @returns Strict VBox status code to pass up.
13586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13587 * @param rcStrict The status from executing an instruction.
13588 */
13589DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13590{
13591 if (rcStrict != VINF_SUCCESS)
13592 {
13593 if (RT_SUCCESS(rcStrict))
13594 {
13595 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13596 || rcStrict == VINF_IOM_R3_IOPORT_READ
13597 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13598 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13599 || rcStrict == VINF_IOM_R3_MMIO_READ
13600 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13601 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13602 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13603 || rcStrict == VINF_CPUM_R3_MSR_READ
13604 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13605 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13606 || rcStrict == VINF_EM_RAW_TO_R3
13607 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13608 /* raw-mode / virt handlers only: */
13609 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13610 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13611 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13612 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13613 || rcStrict == VINF_SELM_SYNC_GDT
13614 || rcStrict == VINF_CSAM_PENDING_ACTION
13615 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13616 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13617/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13618 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13619 if (rcPassUp == VINF_SUCCESS)
13620 pVCpu->iem.s.cRetInfStatuses++;
13621 else if ( rcPassUp < VINF_EM_FIRST
13622 || rcPassUp > VINF_EM_LAST
13623 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13624 {
13625 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13626 pVCpu->iem.s.cRetPassUpStatus++;
13627 rcStrict = rcPassUp;
13628 }
13629 else
13630 {
13631 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13632 pVCpu->iem.s.cRetInfStatuses++;
13633 }
13634 }
13635 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13636 pVCpu->iem.s.cRetAspectNotImplemented++;
13637 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13638 pVCpu->iem.s.cRetInstrNotImplemented++;
13639#ifdef IEM_VERIFICATION_MODE_FULL
13640 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13641 rcStrict = VINF_SUCCESS;
13642#endif
13643 else
13644 pVCpu->iem.s.cRetErrStatuses++;
13645 }
13646 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13647 {
13648 pVCpu->iem.s.cRetPassUpStatus++;
13649 rcStrict = pVCpu->iem.s.rcPassUp;
13650 }
13651
13652 return rcStrict;
13653}
13654
13655
13656/**
13657 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13658 * IEMExecOneWithPrefetchedByPC.
13659 *
13660 * Similar code is found in IEMExecLots.
13661 *
13662 * @return Strict VBox status code.
13663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13665 * @param fExecuteInhibit If set, execute the instruction following CLI,
13666 * POP SS and MOV SS,GR.
13667 */
13668DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13669{
13670#ifdef IEM_WITH_SETJMP
13671 VBOXSTRICTRC rcStrict;
13672 jmp_buf JmpBuf;
13673 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13674 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13675 if ((rcStrict = setjmp(JmpBuf)) == 0)
13676 {
13677 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13678 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13679 }
13680 else
13681 pVCpu->iem.s.cLongJumps++;
13682 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13683#else
13684 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13685 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13686#endif
13687 if (rcStrict == VINF_SUCCESS)
13688 pVCpu->iem.s.cInstructions++;
13689 if (pVCpu->iem.s.cActiveMappings > 0)
13690 {
13691 Assert(rcStrict != VINF_SUCCESS);
13692 iemMemRollback(pVCpu);
13693 }
13694//#ifdef DEBUG
13695// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13696//#endif
13697
13698 /* Execute the next instruction as well if a cli, pop ss or
13699 mov ss, Gr has just completed successfully. */
13700 if ( fExecuteInhibit
13701 && rcStrict == VINF_SUCCESS
13702 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13703 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13704 {
13705 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13706 if (rcStrict == VINF_SUCCESS)
13707 {
13708#ifdef LOG_ENABLED
13709 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13710#endif
13711#ifdef IEM_WITH_SETJMP
13712 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13713 if ((rcStrict = setjmp(JmpBuf)) == 0)
13714 {
13715 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13716 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13717 }
13718 else
13719 pVCpu->iem.s.cLongJumps++;
13720 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13721#else
13722 IEM_OPCODE_GET_NEXT_U8(&b);
13723 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13724#endif
13725 if (rcStrict == VINF_SUCCESS)
13726 pVCpu->iem.s.cInstructions++;
13727 if (pVCpu->iem.s.cActiveMappings > 0)
13728 {
13729 Assert(rcStrict != VINF_SUCCESS);
13730 iemMemRollback(pVCpu);
13731 }
13732 }
13733 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13734 }
13735
13736 /*
13737 * Return value fiddling, statistics and sanity assertions.
13738 */
13739 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13740
13741 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13742 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13743#if defined(IEM_VERIFICATION_MODE_FULL)
13744 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13745 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13746 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13747 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13748#endif
13749 return rcStrict;
13750}
13751
13752
13753#ifdef IN_RC
13754/**
13755 * Re-enters raw-mode or ensure we return to ring-3.
13756 *
13757 * @returns rcStrict, maybe modified.
13758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13759 * @param pCtx The current CPU context.
13760 * @param rcStrict The status code returne by the interpreter.
13761 */
13762DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13763{
13764 if ( !pVCpu->iem.s.fInPatchCode
13765 && ( rcStrict == VINF_SUCCESS
13766 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13767 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13768 {
13769 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13770 CPUMRawEnter(pVCpu);
13771 else
13772 {
13773 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13774 rcStrict = VINF_EM_RESCHEDULE;
13775 }
13776 }
13777 return rcStrict;
13778}
13779#endif
13780
13781
13782/**
13783 * Execute one instruction.
13784 *
13785 * @return Strict VBox status code.
13786 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13787 */
13788VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13789{
13790#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13791 if (++pVCpu->iem.s.cVerifyDepth == 1)
13792 iemExecVerificationModeSetup(pVCpu);
13793#endif
13794#ifdef LOG_ENABLED
13795 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13796 iemLogCurInstr(pVCpu, pCtx, true);
13797#endif
13798
13799 /*
13800 * Do the decoding and emulation.
13801 */
13802 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13803 if (rcStrict == VINF_SUCCESS)
13804 rcStrict = iemExecOneInner(pVCpu, true);
13805
13806#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13807 /*
13808 * Assert some sanity.
13809 */
13810 if (pVCpu->iem.s.cVerifyDepth == 1)
13811 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13812 pVCpu->iem.s.cVerifyDepth--;
13813#endif
13814#ifdef IN_RC
13815 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13816#endif
13817 if (rcStrict != VINF_SUCCESS)
13818 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13819 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13820 return rcStrict;
13821}
13822
13823
13824VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13825{
13826 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13827 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13828
13829 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13830 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13831 if (rcStrict == VINF_SUCCESS)
13832 {
13833 rcStrict = iemExecOneInner(pVCpu, true);
13834 if (pcbWritten)
13835 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13836 }
13837
13838#ifdef IN_RC
13839 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13840#endif
13841 return rcStrict;
13842}
13843
13844
13845VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13846 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13847{
13848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13849 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13850
13851 VBOXSTRICTRC rcStrict;
13852 if ( cbOpcodeBytes
13853 && pCtx->rip == OpcodeBytesPC)
13854 {
13855 iemInitDecoder(pVCpu, false);
13856#ifdef IEM_WITH_CODE_TLB
13857 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13858 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13859 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13860 pVCpu->iem.s.offCurInstrStart = 0;
13861 pVCpu->iem.s.offInstrNextByte = 0;
13862#else
13863 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13864 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13865#endif
13866 rcStrict = VINF_SUCCESS;
13867 }
13868 else
13869 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13870 if (rcStrict == VINF_SUCCESS)
13871 {
13872 rcStrict = iemExecOneInner(pVCpu, true);
13873 }
13874
13875#ifdef IN_RC
13876 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13877#endif
13878 return rcStrict;
13879}
13880
13881
13882VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13883{
13884 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13885 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13886
13887 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13889 if (rcStrict == VINF_SUCCESS)
13890 {
13891 rcStrict = iemExecOneInner(pVCpu, false);
13892 if (pcbWritten)
13893 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13894 }
13895
13896#ifdef IN_RC
13897 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13898#endif
13899 return rcStrict;
13900}
13901
13902
13903VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13904 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13905{
13906 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13907 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13908
13909 VBOXSTRICTRC rcStrict;
13910 if ( cbOpcodeBytes
13911 && pCtx->rip == OpcodeBytesPC)
13912 {
13913 iemInitDecoder(pVCpu, true);
13914#ifdef IEM_WITH_CODE_TLB
13915 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13916 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13917 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13918 pVCpu->iem.s.offCurInstrStart = 0;
13919 pVCpu->iem.s.offInstrNextByte = 0;
13920#else
13921 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13922 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13923#endif
13924 rcStrict = VINF_SUCCESS;
13925 }
13926 else
13927 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13928 if (rcStrict == VINF_SUCCESS)
13929 rcStrict = iemExecOneInner(pVCpu, false);
13930
13931#ifdef IN_RC
13932 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13933#endif
13934 return rcStrict;
13935}
13936
13937
13938/**
13939 * For debugging DISGetParamSize, may come in handy.
13940 *
13941 * @returns Strict VBox status code.
13942 * @param pVCpu The cross context virtual CPU structure of the
13943 * calling EMT.
13944 * @param pCtxCore The context core structure.
13945 * @param OpcodeBytesPC The PC of the opcode bytes.
13946 * @param pvOpcodeBytes Prefeched opcode bytes.
13947 * @param cbOpcodeBytes Number of prefetched bytes.
13948 * @param pcbWritten Where to return the number of bytes written.
13949 * Optional.
13950 */
13951VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13952 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13953 uint32_t *pcbWritten)
13954{
13955 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13956 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13957
13958 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13959 VBOXSTRICTRC rcStrict;
13960 if ( cbOpcodeBytes
13961 && pCtx->rip == OpcodeBytesPC)
13962 {
13963 iemInitDecoder(pVCpu, true);
13964#ifdef IEM_WITH_CODE_TLB
13965 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13966 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13967 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13968 pVCpu->iem.s.offCurInstrStart = 0;
13969 pVCpu->iem.s.offInstrNextByte = 0;
13970#else
13971 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13972 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13973#endif
13974 rcStrict = VINF_SUCCESS;
13975 }
13976 else
13977 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13978 if (rcStrict == VINF_SUCCESS)
13979 {
13980 rcStrict = iemExecOneInner(pVCpu, false);
13981 if (pcbWritten)
13982 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13983 }
13984
13985#ifdef IN_RC
13986 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13987#endif
13988 return rcStrict;
13989}
13990
13991
13992VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13993{
13994 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13995
13996#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13997 /*
13998 * See if there is an interrupt pending in TRPM, inject it if we can.
13999 */
14000 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14001# ifdef IEM_VERIFICATION_MODE_FULL
14002 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14003# endif
14004 if ( pCtx->eflags.Bits.u1IF
14005 && TRPMHasTrap(pVCpu)
14006 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14007 {
14008 uint8_t u8TrapNo;
14009 TRPMEVENT enmType;
14010 RTGCUINT uErrCode;
14011 RTGCPTR uCr2;
14012 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14013 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14014 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14015 TRPMResetTrap(pVCpu);
14016 }
14017
14018 /*
14019 * Log the state.
14020 */
14021# ifdef LOG_ENABLED
14022 iemLogCurInstr(pVCpu, pCtx, true);
14023# endif
14024
14025 /*
14026 * Do the decoding and emulation.
14027 */
14028 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14029 if (rcStrict == VINF_SUCCESS)
14030 rcStrict = iemExecOneInner(pVCpu, true);
14031
14032 /*
14033 * Assert some sanity.
14034 */
14035 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14036
14037 /*
14038 * Log and return.
14039 */
14040 if (rcStrict != VINF_SUCCESS)
14041 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14042 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14043 if (pcInstructions)
14044 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14045 return rcStrict;
14046
14047#else /* Not verification mode */
14048
14049 /*
14050 * See if there is an interrupt pending in TRPM, inject it if we can.
14051 */
14052 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14053# ifdef IEM_VERIFICATION_MODE_FULL
14054 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14055# endif
14056 if ( pCtx->eflags.Bits.u1IF
14057 && TRPMHasTrap(pVCpu)
14058 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14059 {
14060 uint8_t u8TrapNo;
14061 TRPMEVENT enmType;
14062 RTGCUINT uErrCode;
14063 RTGCPTR uCr2;
14064 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14065 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14066 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14067 TRPMResetTrap(pVCpu);
14068 }
14069
14070 /*
14071 * Initial decoder init w/ prefetch, then setup setjmp.
14072 */
14073 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14074 if (rcStrict == VINF_SUCCESS)
14075 {
14076# ifdef IEM_WITH_SETJMP
14077 jmp_buf JmpBuf;
14078 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14079 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14080 pVCpu->iem.s.cActiveMappings = 0;
14081 if ((rcStrict = setjmp(JmpBuf)) == 0)
14082# endif
14083 {
14084 /*
14085 * The run loop. We limit ourselves to 4096 instructions right now.
14086 */
14087 PVM pVM = pVCpu->CTX_SUFF(pVM);
14088 uint32_t cInstr = 4096;
14089 for (;;)
14090 {
14091 /*
14092 * Log the state.
14093 */
14094# ifdef LOG_ENABLED
14095 iemLogCurInstr(pVCpu, pCtx, true);
14096# endif
14097
14098 /*
14099 * Do the decoding and emulation.
14100 */
14101 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14102 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14103 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14104 {
14105 Assert(pVCpu->iem.s.cActiveMappings == 0);
14106 pVCpu->iem.s.cInstructions++;
14107 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14108 {
14109 uint32_t fCpu = pVCpu->fLocalForcedActions
14110 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14111 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14112 | VMCPU_FF_TLB_FLUSH
14113# ifdef VBOX_WITH_RAW_MODE
14114 | VMCPU_FF_TRPM_SYNC_IDT
14115 | VMCPU_FF_SELM_SYNC_TSS
14116 | VMCPU_FF_SELM_SYNC_GDT
14117 | VMCPU_FF_SELM_SYNC_LDT
14118# endif
14119 | VMCPU_FF_INHIBIT_INTERRUPTS
14120 | VMCPU_FF_BLOCK_NMIS ));
14121
14122 if (RT_LIKELY( ( !fCpu
14123 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14124 && !pCtx->rflags.Bits.u1IF) )
14125 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14126 {
14127 if (cInstr-- > 0)
14128 {
14129 Assert(pVCpu->iem.s.cActiveMappings == 0);
14130 iemReInitDecoder(pVCpu);
14131 continue;
14132 }
14133 }
14134 }
14135 Assert(pVCpu->iem.s.cActiveMappings == 0);
14136 }
14137 else if (pVCpu->iem.s.cActiveMappings > 0)
14138 iemMemRollback(pVCpu);
14139 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14140 break;
14141 }
14142 }
14143# ifdef IEM_WITH_SETJMP
14144 else
14145 {
14146 if (pVCpu->iem.s.cActiveMappings > 0)
14147 iemMemRollback(pVCpu);
14148 pVCpu->iem.s.cLongJumps++;
14149 }
14150 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14151# endif
14152
14153 /*
14154 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14155 */
14156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14157 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14158# if defined(IEM_VERIFICATION_MODE_FULL)
14159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14161 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14162 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14163# endif
14164 }
14165
14166 /*
14167 * Maybe re-enter raw-mode and log.
14168 */
14169# ifdef IN_RC
14170 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14171# endif
14172 if (rcStrict != VINF_SUCCESS)
14173 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14174 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14175 if (pcInstructions)
14176 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14177 return rcStrict;
14178#endif /* Not verification mode */
14179}
14180
14181
14182
14183/**
14184 * Injects a trap, fault, abort, software interrupt or external interrupt.
14185 *
14186 * The parameter list matches TRPMQueryTrapAll pretty closely.
14187 *
14188 * @returns Strict VBox status code.
14189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14190 * @param u8TrapNo The trap number.
14191 * @param enmType What type is it (trap/fault/abort), software
14192 * interrupt or hardware interrupt.
14193 * @param uErrCode The error code if applicable.
14194 * @param uCr2 The CR2 value if applicable.
14195 * @param cbInstr The instruction length (only relevant for
14196 * software interrupts).
14197 */
14198VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14199 uint8_t cbInstr)
14200{
14201 iemInitDecoder(pVCpu, false);
14202#ifdef DBGFTRACE_ENABLED
14203 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14204 u8TrapNo, enmType, uErrCode, uCr2);
14205#endif
14206
14207 uint32_t fFlags;
14208 switch (enmType)
14209 {
14210 case TRPM_HARDWARE_INT:
14211 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14212 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14213 uErrCode = uCr2 = 0;
14214 break;
14215
14216 case TRPM_SOFTWARE_INT:
14217 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14218 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14219 uErrCode = uCr2 = 0;
14220 break;
14221
14222 case TRPM_TRAP:
14223 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14224 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14225 if (u8TrapNo == X86_XCPT_PF)
14226 fFlags |= IEM_XCPT_FLAGS_CR2;
14227 switch (u8TrapNo)
14228 {
14229 case X86_XCPT_DF:
14230 case X86_XCPT_TS:
14231 case X86_XCPT_NP:
14232 case X86_XCPT_SS:
14233 case X86_XCPT_PF:
14234 case X86_XCPT_AC:
14235 fFlags |= IEM_XCPT_FLAGS_ERR;
14236 break;
14237
14238 case X86_XCPT_NMI:
14239 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14240 break;
14241 }
14242 break;
14243
14244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14245 }
14246
14247 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14248}
14249
14250
14251/**
14252 * Injects the active TRPM event.
14253 *
14254 * @returns Strict VBox status code.
14255 * @param pVCpu The cross context virtual CPU structure.
14256 */
14257VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14258{
14259#ifndef IEM_IMPLEMENTS_TASKSWITCH
14260 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14261#else
14262 uint8_t u8TrapNo;
14263 TRPMEVENT enmType;
14264 RTGCUINT uErrCode;
14265 RTGCUINTPTR uCr2;
14266 uint8_t cbInstr;
14267 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14268 if (RT_FAILURE(rc))
14269 return rc;
14270
14271 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14272
14273 /** @todo Are there any other codes that imply the event was successfully
14274 * delivered to the guest? See @bugref{6607}. */
14275 if ( rcStrict == VINF_SUCCESS
14276 || rcStrict == VINF_IEM_RAISED_XCPT)
14277 {
14278 TRPMResetTrap(pVCpu);
14279 }
14280 return rcStrict;
14281#endif
14282}
14283
14284
14285VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14286{
14287 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14288 return VERR_NOT_IMPLEMENTED;
14289}
14290
14291
14292VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14293{
14294 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14295 return VERR_NOT_IMPLEMENTED;
14296}
14297
14298
14299#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14300/**
14301 * Executes a IRET instruction with default operand size.
14302 *
14303 * This is for PATM.
14304 *
14305 * @returns VBox status code.
14306 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14307 * @param pCtxCore The register frame.
14308 */
14309VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14310{
14311 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14312
14313 iemCtxCoreToCtx(pCtx, pCtxCore);
14314 iemInitDecoder(pVCpu);
14315 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14316 if (rcStrict == VINF_SUCCESS)
14317 iemCtxToCtxCore(pCtxCore, pCtx);
14318 else
14319 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14320 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14321 return rcStrict;
14322}
14323#endif
14324
14325
14326/**
14327 * Macro used by the IEMExec* method to check the given instruction length.
14328 *
14329 * Will return on failure!
14330 *
14331 * @param a_cbInstr The given instruction length.
14332 * @param a_cbMin The minimum length.
14333 */
14334#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14335 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14336 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14337
14338
14339/**
14340 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14341 *
14342 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14343 *
14344 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14346 * @param rcStrict The status code to fiddle.
14347 */
14348DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14349{
14350 iemUninitExec(pVCpu);
14351#ifdef IN_RC
14352 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14353 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14354#else
14355 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14356#endif
14357}
14358
14359
14360/**
14361 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14362 *
14363 * This API ASSUMES that the caller has already verified that the guest code is
14364 * allowed to access the I/O port. (The I/O port is in the DX register in the
14365 * guest state.)
14366 *
14367 * @returns Strict VBox status code.
14368 * @param pVCpu The cross context virtual CPU structure.
14369 * @param cbValue The size of the I/O port access (1, 2, or 4).
14370 * @param enmAddrMode The addressing mode.
14371 * @param fRepPrefix Indicates whether a repeat prefix is used
14372 * (doesn't matter which for this instruction).
14373 * @param cbInstr The instruction length in bytes.
14374 * @param iEffSeg The effective segment address.
14375 * @param fIoChecked Whether the access to the I/O port has been
14376 * checked or not. It's typically checked in the
14377 * HM scenario.
14378 */
14379VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14380 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14381{
14382 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14383 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14384
14385 /*
14386 * State init.
14387 */
14388 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14389
14390 /*
14391 * Switch orgy for getting to the right handler.
14392 */
14393 VBOXSTRICTRC rcStrict;
14394 if (fRepPrefix)
14395 {
14396 switch (enmAddrMode)
14397 {
14398 case IEMMODE_16BIT:
14399 switch (cbValue)
14400 {
14401 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14402 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14403 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14404 default:
14405 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14406 }
14407 break;
14408
14409 case IEMMODE_32BIT:
14410 switch (cbValue)
14411 {
14412 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14413 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14414 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14415 default:
14416 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14417 }
14418 break;
14419
14420 case IEMMODE_64BIT:
14421 switch (cbValue)
14422 {
14423 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14424 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14425 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14426 default:
14427 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14428 }
14429 break;
14430
14431 default:
14432 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14433 }
14434 }
14435 else
14436 {
14437 switch (enmAddrMode)
14438 {
14439 case IEMMODE_16BIT:
14440 switch (cbValue)
14441 {
14442 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14443 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14444 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14445 default:
14446 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14447 }
14448 break;
14449
14450 case IEMMODE_32BIT:
14451 switch (cbValue)
14452 {
14453 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14454 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14455 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14456 default:
14457 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14458 }
14459 break;
14460
14461 case IEMMODE_64BIT:
14462 switch (cbValue)
14463 {
14464 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14465 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14466 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14467 default:
14468 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14469 }
14470 break;
14471
14472 default:
14473 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14474 }
14475 }
14476
14477 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14478}
14479
14480
14481/**
14482 * Interface for HM and EM for executing string I/O IN (read) instructions.
14483 *
14484 * This API ASSUMES that the caller has already verified that the guest code is
14485 * allowed to access the I/O port. (The I/O port is in the DX register in the
14486 * guest state.)
14487 *
14488 * @returns Strict VBox status code.
14489 * @param pVCpu The cross context virtual CPU structure.
14490 * @param cbValue The size of the I/O port access (1, 2, or 4).
14491 * @param enmAddrMode The addressing mode.
14492 * @param fRepPrefix Indicates whether a repeat prefix is used
14493 * (doesn't matter which for this instruction).
14494 * @param cbInstr The instruction length in bytes.
14495 * @param fIoChecked Whether the access to the I/O port has been
14496 * checked or not. It's typically checked in the
14497 * HM scenario.
14498 */
14499VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14500 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14501{
14502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14503
14504 /*
14505 * State init.
14506 */
14507 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14508
14509 /*
14510 * Switch orgy for getting to the right handler.
14511 */
14512 VBOXSTRICTRC rcStrict;
14513 if (fRepPrefix)
14514 {
14515 switch (enmAddrMode)
14516 {
14517 case IEMMODE_16BIT:
14518 switch (cbValue)
14519 {
14520 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14521 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14522 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14523 default:
14524 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14525 }
14526 break;
14527
14528 case IEMMODE_32BIT:
14529 switch (cbValue)
14530 {
14531 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14532 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14533 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14534 default:
14535 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14536 }
14537 break;
14538
14539 case IEMMODE_64BIT:
14540 switch (cbValue)
14541 {
14542 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14543 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14544 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14545 default:
14546 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14547 }
14548 break;
14549
14550 default:
14551 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14552 }
14553 }
14554 else
14555 {
14556 switch (enmAddrMode)
14557 {
14558 case IEMMODE_16BIT:
14559 switch (cbValue)
14560 {
14561 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14562 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14563 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14564 default:
14565 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14566 }
14567 break;
14568
14569 case IEMMODE_32BIT:
14570 switch (cbValue)
14571 {
14572 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14573 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14574 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14575 default:
14576 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14577 }
14578 break;
14579
14580 case IEMMODE_64BIT:
14581 switch (cbValue)
14582 {
14583 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14584 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14585 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14586 default:
14587 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14588 }
14589 break;
14590
14591 default:
14592 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14593 }
14594 }
14595
14596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14597}
14598
14599
14600/**
14601 * Interface for rawmode to write execute an OUT instruction.
14602 *
14603 * @returns Strict VBox status code.
14604 * @param pVCpu The cross context virtual CPU structure.
14605 * @param cbInstr The instruction length in bytes.
14606 * @param u16Port The port to read.
14607 * @param cbReg The register size.
14608 *
14609 * @remarks In ring-0 not all of the state needs to be synced in.
14610 */
14611VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14612{
14613 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14614 Assert(cbReg <= 4 && cbReg != 3);
14615
14616 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14619}
14620
14621
14622/**
14623 * Interface for rawmode to write execute an IN instruction.
14624 *
14625 * @returns Strict VBox status code.
14626 * @param pVCpu The cross context virtual CPU structure.
14627 * @param cbInstr The instruction length in bytes.
14628 * @param u16Port The port to read.
14629 * @param cbReg The register size.
14630 */
14631VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14632{
14633 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14634 Assert(cbReg <= 4 && cbReg != 3);
14635
14636 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14637 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14638 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14639}
14640
14641
14642/**
14643 * Interface for HM and EM to write to a CRx register.
14644 *
14645 * @returns Strict VBox status code.
14646 * @param pVCpu The cross context virtual CPU structure.
14647 * @param cbInstr The instruction length in bytes.
14648 * @param iCrReg The control register number (destination).
14649 * @param iGReg The general purpose register number (source).
14650 *
14651 * @remarks In ring-0 not all of the state needs to be synced in.
14652 */
14653VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14654{
14655 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14656 Assert(iCrReg < 16);
14657 Assert(iGReg < 16);
14658
14659 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14660 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14661 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14662}
14663
14664
14665/**
14666 * Interface for HM and EM to read from a CRx register.
14667 *
14668 * @returns Strict VBox status code.
14669 * @param pVCpu The cross context virtual CPU structure.
14670 * @param cbInstr The instruction length in bytes.
14671 * @param iGReg The general purpose register number (destination).
14672 * @param iCrReg The control register number (source).
14673 *
14674 * @remarks In ring-0 not all of the state needs to be synced in.
14675 */
14676VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14677{
14678 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14679 Assert(iCrReg < 16);
14680 Assert(iGReg < 16);
14681
14682 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14683 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14684 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14685}
14686
14687
14688/**
14689 * Interface for HM and EM to clear the CR0[TS] bit.
14690 *
14691 * @returns Strict VBox status code.
14692 * @param pVCpu The cross context virtual CPU structure.
14693 * @param cbInstr The instruction length in bytes.
14694 *
14695 * @remarks In ring-0 not all of the state needs to be synced in.
14696 */
14697VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14698{
14699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14700
14701 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14703 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14704}
14705
14706
14707/**
14708 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14709 *
14710 * @returns Strict VBox status code.
14711 * @param pVCpu The cross context virtual CPU structure.
14712 * @param cbInstr The instruction length in bytes.
14713 * @param uValue The value to load into CR0.
14714 *
14715 * @remarks In ring-0 not all of the state needs to be synced in.
14716 */
14717VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14718{
14719 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14720
14721 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14722 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14723 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14724}
14725
14726
14727/**
14728 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14729 *
14730 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14731 *
14732 * @returns Strict VBox status code.
14733 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14734 * @param cbInstr The instruction length in bytes.
14735 * @remarks In ring-0 not all of the state needs to be synced in.
14736 * @thread EMT(pVCpu)
14737 */
14738VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14739{
14740 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14741
14742 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14743 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14744 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14745}
14746
14747#ifdef IN_RING3
14748
14749/**
14750 * Handles the unlikely and probably fatal merge cases.
14751 *
14752 * @returns Merged status code.
14753 * @param rcStrict Current EM status code.
14754 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14755 * with @a rcStrict.
14756 * @param iMemMap The memory mapping index. For error reporting only.
14757 * @param pVCpu The cross context virtual CPU structure of the calling
14758 * thread, for error reporting only.
14759 */
14760DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14761 unsigned iMemMap, PVMCPU pVCpu)
14762{
14763 if (RT_FAILURE_NP(rcStrict))
14764 return rcStrict;
14765
14766 if (RT_FAILURE_NP(rcStrictCommit))
14767 return rcStrictCommit;
14768
14769 if (rcStrict == rcStrictCommit)
14770 return rcStrictCommit;
14771
14772 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14773 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14774 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14775 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14776 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14777 return VERR_IOM_FF_STATUS_IPE;
14778}
14779
14780
14781/**
14782 * Helper for IOMR3ProcessForceFlag.
14783 *
14784 * @returns Merged status code.
14785 * @param rcStrict Current EM status code.
14786 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14787 * with @a rcStrict.
14788 * @param iMemMap The memory mapping index. For error reporting only.
14789 * @param pVCpu The cross context virtual CPU structure of the calling
14790 * thread, for error reporting only.
14791 */
14792DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14793{
14794 /* Simple. */
14795 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14796 return rcStrictCommit;
14797
14798 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14799 return rcStrict;
14800
14801 /* EM scheduling status codes. */
14802 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14803 && rcStrict <= VINF_EM_LAST))
14804 {
14805 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14806 && rcStrictCommit <= VINF_EM_LAST))
14807 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14808 }
14809
14810 /* Unlikely */
14811 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14812}
14813
14814
14815/**
14816 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14817 *
14818 * @returns Merge between @a rcStrict and what the commit operation returned.
14819 * @param pVM The cross context VM structure.
14820 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14821 * @param rcStrict The status code returned by ring-0 or raw-mode.
14822 */
14823VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14824{
14825 /*
14826 * Reset the pending commit.
14827 */
14828 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14829 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14830 ("%#x %#x %#x\n",
14831 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14832 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14833
14834 /*
14835 * Commit the pending bounce buffers (usually just one).
14836 */
14837 unsigned cBufs = 0;
14838 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14839 while (iMemMap-- > 0)
14840 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14841 {
14842 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14843 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14844 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14845
14846 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14847 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14848 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14849
14850 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14851 {
14852 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14854 pbBuf,
14855 cbFirst,
14856 PGMACCESSORIGIN_IEM);
14857 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14858 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14859 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14860 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14861 }
14862
14863 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14864 {
14865 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14867 pbBuf + cbFirst,
14868 cbSecond,
14869 PGMACCESSORIGIN_IEM);
14870 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14871 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14872 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14873 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14874 }
14875 cBufs++;
14876 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14877 }
14878
14879 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14880 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14881 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14882 pVCpu->iem.s.cActiveMappings = 0;
14883 return rcStrict;
14884}
14885
14886#endif /* IN_RING3 */
14887
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette